diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c1fe063
--- /dev/null
+++ b/README.md
@@ -0,0 +1,594 @@
+This document is a copy of http://code.google.com/p/scala-migrations/;
+it may be easier to read it in a browser.
+
+Scala Migrations is a library to manage upgrades and rollbacks to
+database schemas. Migrations allow a source control system to manage
+together the database schema and the code using the schema. It is
+designed to allow multiple developers working on a project with a
+database backend to design schema modifications independently, apply
+the migrations to their local database for debugging and when
+complete, check them into a source control system to manage as one
+manages normal source code. Other developers then check out the new
+migrations and apply them to their local database. Finally, the
+migrations are used to migrate the production databases to the latest
+schema version.
+
+The Scala Migrations library is written in Scala and makes use of the
+clean Scala language to write easy to understand migrations, which are
+also written in Scala. Scala Migrations provides a database
+abstraction layer that allows migrations to target any supported
+database vendor.
+
+### History
+
+Scala Migrations is developed at Sony Pictures Imageworks
+to manage database versioning for internal applications. The design
+is based off Ruby
+on Rails Migrations and in fact shares the exact same
+`schema_migrations` table to manage the list of installed migrations.
+
+### Sample Migration
+
+Here is a migration used by !VnP3, an internal Imageworks project.
+
+```scala
+package com.imageworks.vnp.dao.migrations
+
+import com.imageworks.migration.{Limit,
+ Migration,
+ Name,
+ NotNull,
+ OnDelete,
+ Restrict,
+ Unique}
+
+/**
+ * Create the 'facility_set_membership' table, which is a many-to-many
+ * join table between the 'facility' and 'facility_set' tables. It
+ * represents the sets that a facility is a member of and the
+ * facilities that are in a set. Rows do not have a their own primary
+ * key.
+ */
+class Migrate_20081216235329_FacilitySetMembership
+ extends Migration
+{
+ val tableName = "facility_set_membership"
+
+ def up() {
+ createTable(tableName) { t =>
+ t.varbinary("pk_facility", NotNull, Limit(16))
+ t.varbinary("pk_facility_set", NotNull, Limit(16))
+ t.bigint("created_micros", NotNull)
+ t.bigint("modified_micros", NotNull)
+ }
+
+ // There should only be one pair of (pk_facility_set, pk_facility)
+ // tuples in the entire table, i.e., for one facility set, the
+ // facility should only appear once.
+ addIndex(tableName,
+ Array("pk_facility_set", "pk_facility"),
+ Unique,
+ Name("idx_fac_set_mmbrshp_uniq_pks"))
+
+ addForeignKey(on(tableName -> "pk_facility"),
+ references("facility" -> "pk_facility"),
+ OnDelete(Restrict),
+ Name("fk_fac_set_mmbrshp_pk_fac"))
+
+ addForeignKey(on(tableName -> "pk_facility_set"),
+ references("facility_set" -> "pk_facility_set"),
+ OnDelete(Restrict),
+ Name("fk_fac_set_mmbrshp_pk_fac_set"))
+ }
+
+ def down() {
+ dropTable(tableName)
+ }
+}
+```
+
+To migrate a database to the latest version requires code similar to:
+
+```scala
+import com.imageworks.migration.{DatabaseAdapter,
+ InstallAllMigrations,
+ Vendor}
+
+object Test
+{
+ def main(args: Array[String]) {
+ val driver_class_name = "org.postgresql.Driver"
+ val vendor = Vendor.forDriver(driver_class_name)
+ val migration_adapter = DatabaseAdapter.forVendor(vendor, None)
+ val data_source: javax.sql.DataSource = ...
+ val migrator = new Migrator(data_source, migration_adapter)
+
+ // Now apply all migrations that are in the
+ // com.imageworks.vnp.dao.migrations package.
+ migrator.migrate(InstallAllMigrations, "com.imageworks.vnp.dao.migrations", false)
+ }
+```
+
+To rollback a database to its pristine state:
+
+```scala
+ migrator.migrate(RemoveAllMigrations, "com.imageworks.vnp.dao.migrations", false)
+```
+
+To rollback two migrations:
+
+```scala
+ migrator.migrate(RollbackMigration(2), "com.imageworks.vnp.dao.migrations", false)
+```
+
+And to migrate to a specific migration, rollbacking back migrations
+that are newer than the requested migration version and installing
+migrations older than the requested version.
+
+```scala
+ migrator.migrate(MigrateToVersion(20090731), "com.imageworks.vnp.dao.migrations", false)
+```
+
+### Supported Databases
+
+Scala Migrations currently supports
+
+* Derby
+* MySQL
+* Oracle
+* PostgreSQL
+
+Patches for other databases are welcome; however, you will need to
+submit a [Contributor License Agreement](http://opensource.imageworks.com/cla/).
+
+### Start using Scala Migrations
+
+Maven Central hosts compiled jars for Scala 2.8.0 and greater,
+compiled on JDK 1.6/JDBC 4. All Scala Migrations artifacts have a
+`groupId` of `com.imageworks.scala-migrations`. A separate
+compilation and publish is done for each Scala version, with a
+distinct artifactId of the form `scala-migrations_X.Y.Z`, where
+`X.Y.Z` is the Scala version used to compile Scala Migrations.
+
+Direct links to jars compiled against 2.8.0 or greater can be found at
+[Maven Central](http://search.maven.org/#search%7Cga%7C1%7Cg%3Acom.imageworks.scala-migrations).
+Jars for Scala 2.7.7 for JDBC 3 and JDBC 4 are on the
+[Downloads](http://code.google.com/p/scala-migrations/downloads/list) page.
+
+#### sbt
+
+Add the following to your `build.sbt`:
+
+```scala
+libraryDependencies ++= Seq("com.imageworks.scala-migrations" %% "scala-migrations" % "1.1.1")
+```
+
+#### Ivy
+
+Add the following to the dependencies section of the `ivy.xml` file,
+replacing `X.Y.Z` with your Scala version.
+
+```xml
+
+```
+
+#### Maven
+
+Add the following snippet to the `` section of the
+project's `pom.xml` file, replacing `X.Y.Z` with your Scala version.
+
+```xml
+
+ com.imageworks.scala-migrations
+ scala-migrations_X.Y.Z
+ 1.1.1
+
+```
+
+### Dependencies and Setup
+
+Scala Migrations depends upon:
+
+* The Simple Logging Facade for Java (SLF4J).
+
+ http://www.slf4j.org/
+
+ The Simple Logging Facade for Java or (SLF4J) serves as a simple
+ facade or abstraction for various logging frameworks, e.g. log4j
+ and java.util.logging, allowing the end user to plug in the
+ desired logging framework at deployment time.
+
+ Scala Migrations has a library dependency upon SLF4J's slf4j-api
+ jar, which only provides an interface to a logging API. The
+ application must chose a concrete logging implementation by
+ ensuring that one of the following jars is available in the
+ classpath. If no implementation jar is provided, the the
+ no-operation logging implementation is used.
+
+ * slf4j-log4j12
+
+ Binding for log4j version 1.2, a widely used logging
+ framework. You also need to place log4j.jar on your
+ classpath.
+
+ * slf4j-jcl
+
+ Binding for Jakarta Commons Logging. This binding will
+ delegate all SLF4J logging to JCL.
+
+ * slf4j-jdk14
+
+ Binding for java.util.logging, also referred to as JDK 1.4
+ logging.
+
+ * slf4j-nop
+
+ Binding for NOP, silently discarding all logging.
+
+ * slf4j-simple
+
+ Binding for Simple implementation, which outputs all events to
+ System.err. Only messages of level INFO and higher are
+ printed. This binding may be useful in the context of small
+ applications.
+
+ See http://www.slf4j.org/manual.html for more information.
+
+* The log4jdbc logging JDBC wrapper that logs all JDBC operations.
+
+ http://code.google.com/p/log4jdbc/
+
+ Since running a migration on a production database is dangerous
+ operation that can leave irreversible damage if anything goes
+ wrong, the JDBC connection given to all migrations is a log4jdbc
+ `net.sf.log4jdbc.ConnectionSpy` that wraps the real connection.
+ This logs all method calls so that any migration errors can be
+ fully debugged. log4jdbc uses SLF4J; see the log4jdbc website on
+ how to set up the loggers and logging level for log4jdbc messages.
+
+ As of 1.0.3, Scala Migrations will use log4jdbc to wrap the real
+ database connection if log4jdbc is found at runtime in the
+ classpath, otherwise it will use the raw database connection and
+ not do any SQL specific logging. No special work needs to be done
+ by the migration author to use log4jdbc, besides making it
+ available in the classpath. Before 1.0.3, Scala Migrations
+ required that log4jdbc be in the classpath.
+
+### Migration Naming
+
+In Scala Migrations, the migrations needs to be compiled and their
+`*`.class files need to be made available at runtime; the source files
+will not be available at runtime.
+
+Scala Migrations then needs to know an ordering on the migrations, so
+the timestamp needs to be in the class name. Scala does not support
+naming a symbol such as `20080717013526_YourMigrationName` because the
+name begins with a digit (unless one were to quote the name which
+would look odd), so the Scala Migrations looks for classes named
+
+```
+ Migrate_(\\d+)_([_a-zA-Z0-9]*)
+```
+
+The time stamp can be generated using the following command on Unix systems:
+
+```
+ $ date -u +%Y%m%d%H%M%S
+```
+
+This is different than Ruby on Rails migrations which are in filenames
+of the form
+
+```
+ 20080717013526_your_migration_name.rb
+```
+
+and have a corresponding class name such as `YourMigrationName`. Ruby
+on Rails can find all the migration `*`.rb files for a project and
+load them at runtime and from the filename load the correct class
+name. The ordering to apply the migrations is contained in the
+filename, not the class name.
+
+### Unsupported Database Features
+
+It is not a goal of Scala Migrations to check and report on the
+compatibility of a Scala Migrations specific feature with a database.
+For example, Oracle does not support the `"ON UPDATE SET NULL"` clause
+on a foreign key constraint. If a `OnUpdate(SetNull)` is specified
+for a foreign key constraint, then Scala Migrations will generate that
+clause and ask the database to execute it.
+
+If Scala Migrations did attempt to check on the compatibility of each
+feature, then it would need to grow much larger to know which features
+worked on which database, and even worse, potentially know which
+features appear in which database versions. This is not something
+that the authors of Scala Migrations want to maintain.
+
+### Data Types
+
+The following data types are supported listed with their mappings. If
+a database name is not specified, then the default mapping is used.
+More information on the mappings is below.
+
+* Bigint
+ * Default: `BIGINT`
+ * Oracle: `NUMBER(19, 0)`
+
+* Blob
+ * Default: `BLOB`
+ * MySQL: `LONGBLOB`
+ * PostgreSQL: `BYTEA`
+
+* Boolean
+ * Default: `BOOLEAN`
+ * Derby: Unsupported; even though Derby 1.7 supports a `BOOLEAN`
+ type, Scala Migrations currently always throws an
+ `UnsupportedColumnTypeException`
+ * Oracle: Unsupported; an `UnsupportedColumnTypeException` is
+ thrown if Boolean is used
+
+* Char
+ * Default: `CHAR`
+
+* Decimal
+ * Default: `DECIMAL`
+ * Oracle: `NUMBER`
+
+* Integer
+ * Default: `INTEGER`
+ * Oracle: `NUMBER(10, 0)`
+
+* Smallint
+ * Default: `SMALLINT`
+ * Oracle: `NUMBER(5, 0)`
+
+* Timestamp
+ * Default: `TIMESTAMP`
+ * MySQL: `TIMESTAMP` but does not support fractional precision
+
+* Varbinary
+ * Default: `VARBINARY`
+ * Derby: `VARCHAR FOR BIT DATA`
+ * Oracle: `RAW`
+ * PostgreSQL: `BYTEA`
+
+* Varchar
+ * Default: `VARCHAR`
+ * Oracle: `VARCHAR2`
+
+### Boolean Mapping
+
+Scala Migrations does not define a mapping for the Boolean data type
+in databases that do not have a native Boolean data type. The reason
+is that there are many ways of representing a Boolean value database
+and Scala Migrations is not an ORM layer, so this decision is left to
+the application developer.
+
+Different representations that have been used in schemas include:
+
+* A `CHAR(1)` column containing a 'Y' or 'N' value. The column may
+ have a `CHECK` constraint to ensure that the values are only 'Y'
+ or 'N'.
+
+* An `INTEGER` column with 0 representing to false and all other
+ values representing true.
+
+### BLOB and VARBINARY Mappings
+
+Each database treats BLOB and VARBINARY differently.
+
+| Database | Scala Migrations Type | SQL Type | Maximum Length (bytes) | Specify Length? | Specify Default? | References | Notes |
+|:-----------|:------------------------|:------------------------|:-------------------------|:------------------|:-------------------|:-------------|:--------|
+| Derby | Blob | `BLOB` | 2,147,483,647 | Optional, defaults to 2 GB || No | [1](http://db.apache.org/derby/docs/10.9/ref/rrefblob.html) | |
+| | Varbinary | `VARCHAR FOR BIT DATA` | 32,672 | Required | Yes | [2](http://db.apache.org/derby/docs/10.9/ref/rrefsqlj32714.html) | |
+| MySQL | Blob | `LONGBLOB` | 4,294,967,295 | No | No | [3](http://dev.mysql.com/doc/refman/5.5/en/blob.html) | |
+| | Varbinary | `VARBINARY` | 21,844 >= && <= 65,535 | Required | Yes | [4](http://dev.mysql.com/doc/refman/5.5/en/storage-requirements.html) | Stored in row |
+| Oracle | Blob | `BLOB` | 4,294,967,296 in Oracle 8, larger in newer versions | No | ?? | [5](http://docs.oracle.com/cd/B28359_01/server.111/b28286/sql_elements001.htm#i54330) [6](http://ss64.com/ora/syntax-datatypes.html)| |
+| | Varbinary | `RAW` | 2,000 | Required | ?? | | |
+| PostgreSQL | Blob | `BYTEA` | 1,073,741,823 | No | Yes | [7](http://www.postgresql.org/docs/9.1/static/storage-toast.html)| |
+| | Varbinary | `BYTEA` | 1,073,741,823 | No | Yes || || ||
+
+### Oracle and SMALLINT, INTEGER and BIGINT
+
+Oracle does not have `SMALLINT`, `INTEGER` or `BIGINT` SQL types
+comparable to other databases, such such as Derby, MySQL and
+PostgreSQL. These other databases used a fixed sized signed integer
+with a limited range of values that can be stored in the column.
+
+| Type | Storage | Min value | rax value |
+|:---------|:----------------------|:--------------------|:--------------------|
+| SMALLINT | 2-byte signed integer | -32768 | 32767 |
+| INTEGER | 4-byte signed integer | -2147483648 | 2147483647 |
+| BIGINT | 8-byte signed integer | -9223372036854775808| 9223372036854775807 |
+
+Oracle does support an `INTEGER` column type but it uses a `NUMBER(38)`
+to store it.
+
+On Oracle, a Scala Migration using any of the `SMALLINT`, `INTEGER`
+and `BIGINT` types is mapped to a `NUMBER` with a precision smaller
+than 38.
+
+| Migration Type | Oracle Type |
+|:---------------|:--------------|
+| SMALLINT | NUMBER(5, 0) |
+| INTEGER | NUMBER(10, 0) |
+| BIGINT | NUMBER(19, 0) |
+
+This helps ensure the compatibility of any code running against an
+Oracle database so that it does not assume it can use 38-digit integer
+values in case the data needs to be exported to another database or if
+the code needs to work with other databases. Columns wishing to use a
+`NUMBER(38)` should use a DecimalType column.
+
+### NUMERIC and DECIMAL
+
+There is a minor difference in the definition of the `NUMERIC` and
+`DECIMAL` types according to the SQL
+1992 standard:
+
+```
+17) NUMERIC specifies the data type exact numeric, with the decimal
+ precision and scale specified by the and .
+
+18) DECIMAL specifies the data type exact numeric, with the decimal
+ scale specified by the and the implementation-defined
+ decimal precision equal to or greater than the value of the
+ specified .
+```
+
+However, in practice, all databases we looked at implement them
+identically.
+
+* Derby
+
+ "NUMERIC is a synonym for DECIMAL and behaves the same way. See
+ DECIMAL data type."
+
+ http://db.apache.org/derby/docs/10.4/ref/rrefsqlj12362.html
+
+ http://db.apache.org/derby/docs/10.4/ref/rrefsqlj15260.html
+
+* Mysql
+
+ "NUMERIC implemented as DECIMAL."
+
+ http://dev.mysql.com/doc/refman/5.1/en/numeric-types.html
+
+* Oracle
+
+ Only has the `NUMBER` type.
+
+ http://download-west.oracle.com/docs/cd/B19306_01/server.102/b14200/sql_elements001.htm
+
+ http://download-west.oracle.com/docs/cd/B19306_01/server.102/b14200/sql_elements001.htm#sthref218
+
+* PostgreSQL
+
+ "The types decimal and numeric are equivalent. Both types are
+ part of the SQL standard."
+
+ The documentation uses `NUMERIC` more and lists `DECIMAL` as an alias.
+
+ http://www.postgresql.org/docs/8.3/interactive/datatype-numeric.html
+
+ http://www.postgresql.org/docs/8.3/interactive/datatype.html#DATATYPE-TABLE
+
+### Auto-incrementing Column Default Values
+
+Several databases natively support a default value for integer column
+data types that use as the next default value the next value from an
+automatically increasing sequence of integer values. The use of the
+AutoIncrement column option enables this feature for a column.
+
+Here are the database mappings:
+
+* Derby
+
+ Only supported on `SMALLINT`, `INT` and `BIGINT` data types using
+ Derby's `GENERATED BY DEFAULT AS IDENTITY`. The alternate setting
+ `GENERATED ALWAYS AS IDENTITY` is not used as it is not consistent
+ with MySQL and PostgreSQL which permits the application to
+ explicitly specify the column's value.
+
+ http://db.apache.org/derby/docs/10.9/ref/rrefsqlj37836.html
+
+* MySQL
+
+ Only supported on `SMALLINT`, `INT` and `BIGINT` data types using
+ MySQL's `AUTO_INCREMENT` keyword.
+
+ http://dev.mysql.com/doc/refman/5.5/en/create-table.html
+ http://dev.mysql.com/doc/refman/5.5/en/example-auto-increment.html
+
+* PostgreSQL
+
+ Only supported on `SMALLINT`, `INT` and `BIGINT` data types by
+ replacing the data type name with `SMALLSERIAL`, `SERIAL` and
+ `BIGSERIAL`, respectively. Support for `SMALLSERIAL` is only
+ available in PostgreSQL 9.2 and greater.
+
+ http://www.postgresql.org/docs/9.2/static/datatype-numeric.html#DATATYPE-SERIAL
+
+* Oracle
+
+ No support is provided in this commit as it appears that
+ equivalent functionality can only be provided by using triggers.
+
+### Character Set Encoding
+
+Scala Migrations supports specifying the character set for `Char` and
+`Varchar` columns with the `CharacterSet()` column option, which takes
+the name of the character set as an argument. Currently, the only
+supported character set name is Unicode.
+
+Here is how different databases handle character set encoding.
+
+* Derby
+
+ "Character data types are represented as Unicode 2.0 sequences in
+ Derby."
+
+ So specifying `CharacterSet(Unicode)` does not change its
+ behavior. Using any character set name besides Unicode as the
+ argument to `CharacterSet()` raises a warning and is ignored.
+
+ http://db.apache.org/derby/docs/10.4/devguide/cdevcollation.html
+
+* MySQL
+
+ MySQL supports 30+ character sets and and all of them can be
+ simultaneously used; in fact, a table can have multiple character
+ type columns, each with a different character set. See
+ http://dev.mysql.com/doc/refman/5.5/en/charset-database.html for
+ reference.
+
+ If no `CharacterSet` is used, then MySQL will use the database's
+ or the server's default character set and the default character
+ set's default collation. If `CharacterSet(Unicode)` is used, then
+ Scala Migrations uses the `utf8` character set with the
+ `utf8_unicode_ci` collation, which is not MySQL's default
+ `utf8_general_ci` collation for `utf8`, as `utf8_unicode_ci` is
+ [http://stackoverflow.com/questions/766809/ not incorrect].
+
+ Users wishing to have more control on specifying character sets
+ and collations can discuss this on the developers mailing list.
+
+* PostgreSQL
+
+ The character set encoding is chosen when a database is created
+ with the "createdb" command line utility or the
+
+```
+ CREATE DATABASE ENCODING [=] encoding
+```
+
+ SQL statement. So specifying any `CharacterSet` has no effect.
+
+* Oracle
+
+ Oracle only supports two character sets. The first uses the
+ database character set which was chosen when the database was
+ created. This encoding is used for `CHAR`, `VARCHAR2` and `CLOB`
+ columns. The second character set is called the national
+ character set and is Unicode, which is used for `NCHAR`,
+ `NVARCHAR2` and `NCLOB` columns. There are two encodings
+ available for the national character set, `AL16UTF16` and `UTF8`.
+ By default, Oracle uses `AL16UTF16`.
+
+ http://download-west.oracle.com/docs/cd/B19306_01/server.102/b14225/ch6unicode.htm
+
+ Specifying no `CharacterSet` column option defaults the `Char`
+ type to `CHAR` and the `Varchar` type to `VARCHAR2`. If
+ `CharacterSet(Unicode)` is given, then `Char` uses `NCHAR` and
+ `Varchar` uses `NVARCHAR2`. Using any character set name besides
+ `Unicode` as the argument to `CharacterSet()` raises a warning and
+ is ignored, resulting in `CHAR` and `VARCHAR2` column types.
+
+### Caveats
+
+* Index and foreign key names do not use the same naming convention
+ as the Ruby on Rails migrations, so a port of Ruby on Rails
+ migrations to Scala Migrations should specify the index name using
+ the `Name()` case class as an option to `add_index()` or
+ `remove_index()`.
diff --git a/build.sbt b/build.sbt
index 0d49844..644ab7e 100644
--- a/build.sbt
+++ b/build.sbt
@@ -18,7 +18,7 @@ licenses += "New BSD License" -> url("http://opensource.org/licenses/BSD-3-Claus
version := "1.1.2-SNAPSHOT"
-scalaVersion := "2.10.2"
+scalaVersion := "2.11.4"
// For a single major Scala release, e.g. 2.x.y, include at most one
// Scala release candidate in crossScalaVersions, e.g. "2.x.y-RC3".
@@ -27,8 +27,8 @@ scalaVersion := "2.10.2"
crossScalaVersions := Seq("2.9.0", "2.9.0-1",
"2.9.1", "2.9.1-1",
"2.9.2", "2.9.3",
- "2.10.2",
- "2.11.0-M3")
+ "2.10.4",
+ "2.11.4")
// Increase warnings generated by the Scala compiler.
//
@@ -54,6 +54,7 @@ scalacOptions <++= scalaVersion map { v: String =>
}
libraryDependencies ++= Seq(
+ "com.google.code.findbugs" % "jsr305" % "2.0.3",
"com.novocode" % "junit-interface" % "0.10-M4" % "test",
"log4jdbc" % "log4jdbc" % "1.1" from "http://log4jdbc.googlecode.com/files/log4jdbc4-1.1.jar",
"mysql" % "mysql-connector-java" % "[5.1.0,5.2)" % "test",
@@ -62,8 +63,10 @@ libraryDependencies ++= Seq(
"org.jmock" % "jmock-junit4" % "[2.5.1,3.0)" % "test",
"org.slf4j" % "slf4j-api" % "[1.5.8,2.0)",
"org.slf4j" % "slf4j-log4j12" % "[1.5.8,2.0)" % "test",
+ "com.h2database" % "h2" % "1.3.176" % "test",
"postgresql" % "postgresql" % "9.1-901.jdbc4" % "test")
+
// Run unit tests serially otherwise races can occur between two
// threads checking if the 'schema_migrations' table exists and
// trying to create it.
diff --git a/project/build.properties b/project/build.properties
new file mode 100644
index 0000000..748703f
--- /dev/null
+++ b/project/build.properties
@@ -0,0 +1 @@
+sbt.version=0.13.7
diff --git a/project/plugins.sbt b/project/plugins.sbt
index 64b27e5..f0ee84d 100644
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -1,5 +1,5 @@
-addSbtPlugin("com.github.mpeltonen" % "sbt-idea" % "1.1.0")
+addSbtPlugin("com.github.mpeltonen" % "sbt-idea" % "1.5.1")
-addSbtPlugin("com.jsuereth" % "xsbt-gpg-plugin" % "0.6")
+addSbtPlugin("com.typesafe.sbt" % "sbt-pgp" % "0.8.1")
-addSbtPlugin("com.typesafe.sbt" % "sbt-scalariform" % "1.0.1")
+addSbtPlugin("com.typesafe.sbt" % "sbt-scalariform" % "1.2.0")
diff --git a/src/main/scala/com/imageworks/migration/DatabaseAdapter.scala b/src/main/scala/com/imageworks/migration/DatabaseAdapter.scala
index 75b7260..e0b8556 100644
--- a/src/main/scala/com/imageworks/migration/DatabaseAdapter.scala
+++ b/src/main/scala/com/imageworks/migration/DatabaseAdapter.scala
@@ -61,6 +61,9 @@ object DatabaseAdapter {
case Postgresql =>
new PostgresqlDatabaseAdapter(schemaNameOpt)
+ case H2 =>
+ new H2DatabaseAdapter(schemaNameOpt)
+
case null =>
throw new IllegalArgumentException("Must pass a non-null vendor to " +
"this function.")
diff --git a/src/main/scala/com/imageworks/migration/H2DatabaseAdapter.scala b/src/main/scala/com/imageworks/migration/H2DatabaseAdapter.scala
new file mode 100644
index 0000000..7f489af
--- /dev/null
+++ b/src/main/scala/com/imageworks/migration/H2DatabaseAdapter.scala
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015 Sony Pictures Imageworks Inc.
+ *
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution. Neither the name of Sony Pictures Imageworks nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.imageworks.migration
+
+trait H2AutoIncrementingColumnDefinitionMixin
+ extends ColumnDefinition
+ with ColumnSupportsAutoIncrement {
+ override protected abstract def sql: String = {
+ if (isAutoIncrement) super.sql + " AUTO_INCREMENT"
+ else super.sql
+ }
+}
+
+class H2BigintColumnDefinition
+ extends DefaultBigintColumnDefinition
+ with H2AutoIncrementingColumnDefinitionMixin
+
+class H2IntegerColumnDefinition
+ extends DefaultIntegerColumnDefinition
+ with H2AutoIncrementingColumnDefinitionMixin
+
+class H2SmallintColumnDefinition
+ extends DefaultSmallintColumnDefinition
+ with H2AutoIncrementingColumnDefinitionMixin
+
+// H2 does not support size specifiers for the TIMESTAMP data type.
+class H2TimestampColumnDefinition
+ extends ColumnDefinition
+ with ColumnSupportsDefault {
+ override val sql = "TIMESTAMP"
+}
+
+class H2DatabaseAdapter(override val schemaNameOpt: Option[String])
+ extends DatabaseAdapter(schemaNameOpt) {
+ override val vendor = H2
+
+ override val quoteCharacter = '`'
+
+ override val unquotedNameConverter = UppercaseUnquotedNameConverter
+
+ override val userFactory = PlainUserFactory
+
+ override val alterTableDropForeignKeyConstraintPhrase = "CONSTRAINT"
+
+ override val addingForeignKeyConstraintCreatesIndex = true
+
+ override val supportsCheckConstraints = false
+
+ override def columnDefinitionFactory(columnType: SqlType,
+ characterSetOpt: Option[CharacterSet]): ColumnDefinition = {
+ columnType match {
+ case BigintType =>
+ new H2BigintColumnDefinition
+ case BlobType =>
+ new DefaultBlobColumnDefinition
+ case BooleanType =>
+ new DefaultBooleanColumnDefinition
+ case CharType =>
+ new DefaultCharColumnDefinition
+ case DecimalType =>
+ new DefaultDecimalColumnDefinition
+ case IntegerType =>
+ new H2IntegerColumnDefinition
+ case SmallintType =>
+ new H2SmallintColumnDefinition
+ case TimestampType =>
+ new H2TimestampColumnDefinition
+ case VarbinaryType =>
+ new DefaultVarbinaryColumnDefinition
+ case VarcharType =>
+ new DefaultVarcharColumnDefinition
+ }
+ }
+
+ override def lockTableSql(schemaNameOpt: Option[String],
+ tableName: String): String = {
+ "SELECT * FROM " + quoteTableName(schemaNameOpt, tableName) + " FOR UPDATE"
+ }
+
+ override protected def alterColumnSql(schemaNameOpt: Option[String],
+ columnDefinition: ColumnDefinition): String = {
+ new java.lang.StringBuilder(512)
+ .append("ALTER TABLE ")
+ .append(quoteTableName(schemaNameOpt, columnDefinition.getTableName))
+ .append(" MODIFY COLUMN ")
+ .append(quoteColumnName(columnDefinition.getColumnName))
+ .append(columnDefinition.toSql)
+ .toString
+ }
+
+ override def removeIndexSql(schemaNameOpt: Option[String],
+ tableName: String,
+ indexName: String): String = {
+ new java.lang.StringBuilder(128)
+ .append("ALTER TABLE ")
+ .append(quoteTableName(schemaNameOpt, tableName))
+ .append(" DROP INDEX ")
+ .append(quoteIndexName(None, indexName))
+ .toString
+ }
+}
diff --git a/src/main/scala/com/imageworks/migration/JavaDatabaseAdapter.scala b/src/main/scala/com/imageworks/migration/JavaDatabaseAdapter.scala
index e839e52..ee37b87 100644
--- a/src/main/scala/com/imageworks/migration/JavaDatabaseAdapter.scala
+++ b/src/main/scala/com/imageworks/migration/JavaDatabaseAdapter.scala
@@ -114,4 +114,23 @@ object JavaDatabaseAdapter {
def getPostgresqlDatabaseAdapter(schemaName: String): PostgresqlDatabaseAdapter = {
new PostgresqlDatabaseAdapter(Some(schemaName))
}
+
+ /**
+ * Create a H2 Database Adapter.
+ *
+ * @return newly constructed H2DatabaseAdapter
+ */
+ def getH2DatabaseAdapter: H2DatabaseAdapter = {
+ new H2DatabaseAdapter(None)
+ }
+
+ /**
+ * Create a H2 Database Adapter.
+ *
+ * @param schemaName the default schema name in the adapter
+ * @return newly constructed H2DatabaseAdapter
+ */
+ def getH2DatabaseAdapter(schemaName: String): H2DatabaseAdapter = {
+ new H2DatabaseAdapter(Some(schemaName))
+ }
}
diff --git a/src/main/scala/com/imageworks/migration/Vendor.scala b/src/main/scala/com/imageworks/migration/Vendor.scala
index 67a916c..850b4a5 100644
--- a/src/main/scala/com/imageworks/migration/Vendor.scala
+++ b/src/main/scala/com/imageworks/migration/Vendor.scala
@@ -46,6 +46,8 @@ case object Oracle
extends Vendor
case object Postgresql
extends Vendor
+case object H2
+ extends Vendor
object Vendor {
/**
@@ -77,6 +79,9 @@ object Vendor {
case "org.postgresql.Driver" =>
Postgresql
+ case "org.h2.Driver" =>
+ H2
+
case null =>
throw new IllegalArgumentException("Must pass a non-null JDBC " +
"driver class name to this " +
diff --git a/src/test/scala/com/imageworks/migration/tests/DatabaseAdapterTests.scala b/src/test/scala/com/imageworks/migration/tests/DatabaseAdapterTests.scala
index 641fd99..6727ba4 100644
--- a/src/test/scala/com/imageworks/migration/tests/DatabaseAdapterTests.scala
+++ b/src/test/scala/com/imageworks/migration/tests/DatabaseAdapterTests.scala
@@ -32,18 +32,7 @@
*/
package com.imageworks.migration.tests
-import com.imageworks.migration.{
- DatabaseAdapter,
- Derby,
- DerbyDatabaseAdapter,
- Mysql,
- MysqlDatabaseAdapter,
- Oracle,
- OracleDatabaseAdapter,
- Postgresql,
- PostgresqlDatabaseAdapter
-}
-
+import com.imageworks.migration._
import org.junit.Assert._
import org.junit.Test
@@ -61,6 +50,9 @@ class DatabaseAdapterTests {
assertEquals(classOf[PostgresqlDatabaseAdapter],
DatabaseAdapter.forVendor(Postgresql, None).getClass)
+
+ assertEquals(classOf[H2DatabaseAdapter],
+ DatabaseAdapter.forVendor(H2, None).getClass)
}
@Test(expected = classOf[IllegalArgumentException])
diff --git a/src/test/scala/com/imageworks/migration/tests/MigrationTests.scala b/src/test/scala/com/imageworks/migration/tests/MigrationTests.scala
index 9efc688..a918332 100644
--- a/src/test/scala/com/imageworks/migration/tests/MigrationTests.scala
+++ b/src/test/scala/com/imageworks/migration/tests/MigrationTests.scala
@@ -32,22 +32,7 @@
*/
package com.imageworks.migration.tests
-import com.imageworks.migration.{
- AutoCommit,
- Derby,
- DuplicateMigrationDescriptionException,
- DuplicateMigrationVersionException,
- InstallAllMigrations,
- MigrateToVersion,
- Migration,
- Migrator,
- Mysql,
- Oracle,
- Postgresql,
- RemoveAllMigrations,
- RollbackMigration,
- With
-}
+import com.imageworks.migration._
import org.jmock.{
Expectations,
@@ -371,6 +356,7 @@ class MigrationTests {
case Mysql => true
case Oracle => false
case Postgresql => false
+ case H2 => true
}
var autoPk = 1
@@ -497,7 +483,7 @@ class MigrationTests {
// With JDK 1.6 or later, a java.sql.SQLSyntaxErrorException
// could be caught here, but for 1.5 compatibility, only a
// java.sql.SQLException is caught.
- case _: SQLException => // expected
+ case e: SQLException => // expected
}
// perform grants
diff --git a/src/test/scala/com/imageworks/migration/tests/TestDatabase.scala b/src/test/scala/com/imageworks/migration/tests/TestDatabase.scala
index 17de29c..cbd6f12 100644
--- a/src/test/scala/com/imageworks/migration/tests/TestDatabase.scala
+++ b/src/test/scala/com/imageworks/migration/tests/TestDatabase.scala
@@ -32,15 +32,7 @@
*/
package com.imageworks.migration.tests
-import com.imageworks.migration.{
- AutoCommit,
- ConnectionBuilder,
- DatabaseAdapter,
- DerbyDatabaseAdapter,
- MysqlDatabaseAdapter,
- PostgresqlDatabaseAdapter,
- With
-}
+import com.imageworks.migration._
import java.sql.{
DriverManager,
@@ -165,9 +157,6 @@ object DerbyTestDatabase
}
}
catch {
- // For JDBC3 (JDK 1.5)
- case _: org.apache.derby.impl.jdbc.EmbedSQLException =>
-
// For JDBC4 (JDK 1.6), a
// java.sql.SQLNonTransientConnectionException is thrown, but this
// exception class does not exist in JDK 1.5, so catch a
@@ -351,6 +340,69 @@ object PostgresqlTestDatabase
new PostgresqlDatabaseAdapter(Some(getSchemaName))
}
}
+object H2TestDatabase
+ extends TestDatabase {
+ // Username of the admin account, which will be the owner of the
+ // database.
+ private val adminUsername = {
+ System.getProperty(TestDatabase.adminUserNameProperty, "admin")
+ }
+
+ override def getAdminAccountName = adminUsername
+
+ // Password for the admin account.
+ private val adminPassword = {
+ System.getProperty(TestDatabase.adminUserPasswordProperty, "admin")
+ }
+
+ // Username of the user account.
+ private val userUsername = {
+ System.getProperty(TestDatabase.userUserNameProperty, "user")
+ }
+
+ override def getUserAccountName = userUsername
+
+ // Password for the user account.
+ private val userPassword = {
+ System.getProperty(TestDatabase.userUserPasswordProperty, "user")
+ }
+
+ // h2 use default public schema
+ override def getSchemaName: String = {
+ System.getProperty(TestDatabase.databaseNameProperty, "PUBLIC")
+ }
+
+ // The base JDBC URL.
+ private val url = {
+ //"jdbc:h2:mem:" + getSchemaName
+ "jdbc:h2:mem:mytest"
+ }
+ //admin JDBC url
+ private val adminUrl = url +";DB_CLOSE_DELAY=-1"
+
+ // Load the h2 database driver.
+ Class.forName("org.h2.Driver")
+
+ //create user account
+ With.autoClosingConnection(DriverManager.getConnection(
+ adminUrl,
+ adminUsername,
+ adminPassword)) { c =>
+ TestDatabase.execute(getAdminConnectionBuilder,"CREATE USER "+getUserAccountName+" PASSWORD '"+userPassword+"'")
+ }
+
+ override def getAdminConnectionBuilder: ConnectionBuilder = {
+ new ConnectionBuilder(adminUrl, adminUsername, adminPassword)
+ }
+
+ override def getUserConnectionBuilder: ConnectionBuilder = {
+ new ConnectionBuilder(url, userUsername, userPassword)
+ }
+
+ override def getDatabaseAdapter: DatabaseAdapter = {
+ new H2DatabaseAdapter(None)
+ }
+}
/**
* Object which builds the correct TestDatabase according to the
@@ -375,6 +427,8 @@ object TestDatabase
MysqlTestDatabase
case "postgresql" =>
PostgresqlTestDatabase
+ case "h2" =>
+ H2TestDatabase
case v =>
throw new RuntimeException("Unexpected value for \"" +
vendorNameProperty +
diff --git a/src/test/scala/com/imageworks/migration/tests/vendor/Migrate_20121104011043_CheckVendor.scala b/src/test/scala/com/imageworks/migration/tests/vendor/Migrate_20121104011043_CheckVendor.scala
index 4bc9e30..1196471 100644
--- a/src/test/scala/com/imageworks/migration/tests/vendor/Migrate_20121104011043_CheckVendor.scala
+++ b/src/test/scala/com/imageworks/migration/tests/vendor/Migrate_20121104011043_CheckVendor.scala
@@ -32,13 +32,7 @@
*/
package com.imageworks.migration.tests.vendor
-import com.imageworks.migration.{
- Derby,
- Migration,
- Mysql,
- Oracle,
- Postgresql
-}
+import com.imageworks.migration._
class Migrate_20121104011043_CheckVendor
extends Migration {
@@ -48,6 +42,7 @@ class Migrate_20121104011043_CheckVendor
case Mysql =>
case Oracle =>
case Postgresql =>
+ case H2 =>
case v => throw new AssertionError("Database vendor '" +
v +
"' not handled.")