diff --git a/future/symmetric-jdbc/.classpath b/future/symmetric-jdbc/.classpath
new file mode 100644
index 0000000000..7ac258f3b2
--- /dev/null
+++ b/future/symmetric-jdbc/.classpath
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/future/symmetric-jdbc/.project b/future/symmetric-jdbc/.project
new file mode 100644
index 0000000000..cc7edb124e
--- /dev/null
+++ b/future/symmetric-jdbc/.project
@@ -0,0 +1,17 @@
+
+
+ symmetric-jdbc
+
+
+
+
+
+ org.eclipse.jdt.core.javabuilder
+
+
+
+
+
+ org.eclipse.jdt.core.javanature
+
+
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/AbstractJdbcPlatform.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/AbstractJdbcPlatform.java
new file mode 100644
index 0000000000..c41ba6352c
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/AbstractJdbcPlatform.java
@@ -0,0 +1,31 @@
+package org.jumpmind.symmetric.jdbc.db;
+
+import javax.sql.DataSource;
+
+import org.jumpmind.symmetric.data.db.AbstractPlatform;
+import org.jumpmind.symmetric.data.model.Table;
+
+abstract public class AbstractJdbcPlatform extends AbstractPlatform {
+
+ protected DataSource dataSource;
+
+ protected JdbcModelReader jdbcModelReader;
+
+ @Override
+ public Table findTable(String catalog, String schema, String tableName) {
+ return null;
+ }
+
+ public java.util.List
findTables(String catalog, String schema) {
+ return null;
+ };
+
+ protected void setDataSource(DataSource dataSource) {
+ this.dataSource = dataSource;
+ }
+
+ public DataSource getDataSource() {
+ return dataSource;
+ }
+
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/DatabaseMetaDataWrapper.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/DatabaseMetaDataWrapper.java
new file mode 100644
index 0000000000..df26548708
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/DatabaseMetaDataWrapper.java
@@ -0,0 +1,194 @@
+package org.jumpmind.symmetric.jdbc.db;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+/**
+ * Wrapper class for database meta data that stores additional info.
+ *
+ * @version $Revision: 329426 $
+ */
+public class DatabaseMetaDataWrapper
+{
+ /** The database meta data. */
+ private DatabaseMetaData _metaData;
+ /** The catalog to acess in the database. */
+ private String _catalog;
+ /** The schema(s) to acess in the database. */
+ private String _schemaPattern;
+ /** The table types to process. */
+ private String[] _tableTypes;
+
+ /**
+ * Returns the database meta data.
+ *
+ * @return The meta data
+ */
+ public DatabaseMetaData getMetaData()
+ {
+ return _metaData;
+ }
+
+ /**
+ * Sets the database meta data.
+ *
+ * @param metaData The meta data
+ */
+ public void setMetaData(DatabaseMetaData metaData)
+ {
+ _metaData = metaData;
+ }
+
+ /**
+ * Returns the catalog in the database to read.
+ *
+ * @return The catalog
+ */
+ public String getCatalog()
+ {
+ return _catalog;
+ }
+
+ /**
+ * Sets the catalog in the database to read.
+ *
+ * @param catalog The catalog
+ */
+ public void setCatalog(String catalog)
+ {
+ _catalog = catalog;
+ }
+
+ /**
+ * Returns the schema in the database to read.
+ *
+ * @return The schema
+ */
+ public String getSchemaPattern()
+ {
+ return _schemaPattern;
+ }
+
+ /**
+ * Sets the schema in the database to read.
+ *
+ * @param schema The schema
+ */
+ public void setSchemaPattern(String schema)
+ {
+ _schemaPattern = schema;
+ }
+
+ /**
+ * Returns the table types to recognize.
+ *
+ * @return The table types
+ */
+ public String[] getTableTypes()
+ {
+ return _tableTypes;
+ }
+
+ /**
+ * Sets the table types to recognize.
+ *
+ * @param types The table types
+ */
+ public void setTableTypes(String[] types)
+ {
+ _tableTypes = types;
+ }
+
+ /**
+ * Convenience method to return the table meta data using the configured catalog,
+ * schema pattern and table types.
+ *
+ * @param tableNamePattern The pattern identifying for which tables to return info
+ * @return The table meta data
+ * @throws SQLException If an error occurred retrieving the meta data
+ * @see DatabaseMetaData#getTables(java.lang.String, java.lang.String, java.lang.String, java.lang.String[])
+ */
+ public ResultSet getTables(String tableNamePattern) throws SQLException
+ {
+ return getMetaData().getTables(getCatalog(), getSchemaPattern(), tableNamePattern, getTableTypes());
+ }
+
+ /**
+ * Convenience method to return the column meta data using the configured catalog and
+ * schema pattern.
+ *
+ * @param tableNamePattern The pattern identifying for which tables to return info
+ * @param columnNamePattern The pattern identifying for which columns to return info
+ * @return The column meta data
+ * @throws SQLException If an error occurred retrieving the meta data
+ * @see DatabaseMetaData#getColumns(java.lang.String, java.lang.String, java.lang.String, java.lang.String)
+ */
+ public ResultSet getColumns(String tableNamePattern, String columnNamePattern) throws SQLException
+ {
+ return getMetaData().getColumns(getCatalog(), getSchemaPattern(), tableNamePattern, columnNamePattern);
+ }
+
+ /**
+ * Convenience method to return the primary key meta data using the configured catalog and
+ * schema pattern.
+ *
+ * @param tableNamePattern The pattern identifying for which tables to return info
+ * @return The primary key meta data
+ * @throws SQLException If an error occurred retrieving the meta data
+ * @see DatabaseMetaData#getPrimaryKeys(java.lang.String, java.lang.String, java.lang.String)
+ */
+ public ResultSet getPrimaryKeys(String tableNamePattern) throws SQLException
+ {
+ return getMetaData().getPrimaryKeys(getCatalog(), getSchemaPattern(), tableNamePattern);
+ }
+
+ /**
+ * Convenience method to return the foreign key meta data using the configured catalog and
+ * schema pattern.
+ *
+ * @param tableNamePattern The pattern identifying for which tables to return info
+ * @return The foreign key meta data
+ * @throws SQLException If an error occurred retrieving the meta data
+ * @see DatabaseMetaData#getImportedKeys(java.lang.String, java.lang.String, java.lang.String)
+ */
+ public ResultSet getForeignKeys(String tableNamePattern) throws SQLException
+ {
+ return getMetaData().getImportedKeys(getCatalog(), getSchemaPattern(), tableNamePattern);
+ }
+
+ /**
+ * Convenience method to return the index meta data using the configured catalog and
+ * schema pattern.
+ *
+ * @param tableNamePattern The pattern identifying for which tables to return info
+ * @param unique Whether to return only indices for unique values
+ * @param approximate Whether the result is allowed to reflect approximate or out of data values
+ * @return The index meta data
+ * @throws SQLException If an error occurred retrieving the meta data
+ * @see DatabaseMetaData#getIndexInfo(java.lang.String, java.lang.String, java.lang.String, boolean, boolean)
+ */
+ public ResultSet getIndices(String tableNamePattern, boolean unique, boolean approximate) throws SQLException
+ {
+ return getMetaData().getIndexInfo(getCatalog(), getSchemaPattern(), tableNamePattern, unique, approximate);
+ }
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/JdbcModelReader.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/JdbcModelReader.java
new file mode 100644
index 0000000000..82e9bc0898
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/JdbcModelReader.java
@@ -0,0 +1,1265 @@
+package org.jumpmind.symmetric.jdbc.db;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
+import java.text.Collator;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.sql.DataSource;
+
+import org.jumpmind.symmetric.data.common.Log;
+import org.jumpmind.symmetric.data.common.LogFactory;
+import org.jumpmind.symmetric.data.common.LogLevel;
+import org.jumpmind.symmetric.data.common.StringUtils;
+import org.jumpmind.symmetric.data.db.IPlatform;
+import org.jumpmind.symmetric.data.db.PlatformInfo;
+import org.jumpmind.symmetric.data.model.Column;
+import org.jumpmind.symmetric.data.model.Database;
+import org.jumpmind.symmetric.data.model.ForeignKey;
+import org.jumpmind.symmetric.data.model.Index;
+import org.jumpmind.symmetric.data.model.IndexColumn;
+import org.jumpmind.symmetric.data.model.NonUniqueIndex;
+import org.jumpmind.symmetric.data.model.Reference;
+import org.jumpmind.symmetric.data.model.Table;
+import org.jumpmind.symmetric.data.model.UniqueIndex;
+import org.jumpmind.symmetric.data.process.sql.DataException;
+import org.jumpmind.symmetric.jdbc.sql.IConnectionCallback;
+import org.jumpmind.symmetric.jdbc.sql.Template;
+
+/**
+ * An utility class to create a Database model from a live database.
+ *
+ * @version $Revision: 543392 $
+ */
+public class JdbcModelReader {
+ /** The Log to which logging calls will be made. */
+ protected final Log _log = LogFactory.getLog(getClass());
+
+ /** The descriptors for the relevant columns in the table meta data. */
+ private final List _columnsForTable;
+ /** The descriptors for the relevant columns in the table column meta data. */
+ private final List _columnsForColumn;
+ /** The descriptors for the relevant columns in the primary key meta data. */
+ private final List _columnsForPK;
+ /** The descriptors for the relevant columns in the foreign key meta data. */
+ private final List _columnsForFK;
+ /** The descriptors for the relevant columns in the index meta data. */
+ private final List _columnsForIndex;
+
+ /** The platform that this model reader belongs to. */
+ private IPlatform _platform;
+ /**
+ * Contains default column sizes (minimum sizes that a JDBC-compliant db
+ * must support).
+ */
+ private HashMap _defaultSizes = new HashMap();
+ /** The default database catalog to read. */
+ private String _defaultCatalogPattern = "%";
+ /** The default database schema(s) to read. */
+ private String _defaultSchemaPattern = "%";
+ /** The default pattern for reading all tables. */
+ private String _defaultTablePattern = "%";
+ /** The default pattern for reading all columns. */
+ private String _defaultColumnPattern;
+ /** The table types to recognize per default. */
+ private String[] _defaultTableTypes = { "TABLE" };
+ /** The active connection while reading a database model. */
+ private Connection _connection;
+
+ private DataSource dataSource;
+
+ /**
+ * Creates a new model reader instance.
+ *
+ * @param platform
+ * The plaftform this builder belongs to
+ */
+ public JdbcModelReader(IPlatform platform, DataSource dataSource) {
+ _platform = platform;
+ this.dataSource = dataSource;
+
+ _defaultSizes.put(new Integer(Types.CHAR), "254");
+ _defaultSizes.put(new Integer(Types.VARCHAR), "254");
+ _defaultSizes.put(new Integer(Types.LONGVARCHAR), "254");
+ _defaultSizes.put(new Integer(Types.BINARY), "254");
+ _defaultSizes.put(new Integer(Types.VARBINARY), "254");
+ _defaultSizes.put(new Integer(Types.LONGVARBINARY), "254");
+ _defaultSizes.put(new Integer(Types.INTEGER), "32");
+ _defaultSizes.put(new Integer(Types.BIGINT), "64");
+ _defaultSizes.put(new Integer(Types.REAL), "7,0");
+ _defaultSizes.put(new Integer(Types.FLOAT), "15,0");
+ _defaultSizes.put(new Integer(Types.DOUBLE), "15,0");
+ _defaultSizes.put(new Integer(Types.DECIMAL), "15,15");
+ _defaultSizes.put(new Integer(Types.NUMERIC), "15,15");
+
+ _columnsForTable = initColumnsForTable();
+ _columnsForColumn = initColumnsForColumn();
+ _columnsForPK = initColumnsForPK();
+ _columnsForFK = initColumnsForFK();
+ _columnsForIndex = initColumnsForIndex();
+ }
+
+ /**
+ * Returns the platform that this model reader belongs to.
+ *
+ * @return The platform
+ */
+ public IPlatform getPlatform() {
+ return _platform;
+ }
+
+ /**
+ * Returns the platform specific settings.
+ *
+ * @return The platform settings
+ */
+ public PlatformInfo getPlatformInfo() {
+ return _platform.getPlatformInfo();
+ }
+
+ /**
+ * Returns descriptors for the columns that shall be read from the result
+ * set when reading the meta data for a table. Note that the columns are
+ * read in the order defined by this list.
+ * Redefine this method if you want more columns or a different order.
+ *
+ * @return The descriptors for the result set columns
+ */
+ protected List initColumnsForTable() {
+ List result = new ArrayList();
+
+ result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("TABLE_TYPE", Types.VARCHAR, "UNKNOWN"));
+ result.add(new MetaDataColumnDescriptor("TABLE_CAT", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("TABLE_SCHEM", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+ return result;
+ }
+
+ /**
+ * Returns descriptors for the columns that shall be read from the result
+ * set when reading the meta data for table columns. Note that the columns
+ * are read in the order defined by this list.
+ * Redefine this method if you want more columns or a different order.
+ *
+ * @return The map column name -> descriptor for the result set columns
+ */
+ protected List initColumnsForColumn() {
+ List result = new ArrayList();
+
+ // As suggested by Alexandre Borgoltz, we're reading the COLUMN_DEF
+ // first because Oracle
+ // has problems otherwise (it seemingly requires a LONG column to be the
+ // first to be read)
+ // See also DDLUTILS-29
+ result.add(new MetaDataColumnDescriptor("COLUMN_DEF", Types.VARCHAR));
+ // we're also reading the table name so that a model reader impl can
+ // filter manually
+ result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("DATA_TYPE", Types.INTEGER, new Integer(
+ java.sql.Types.OTHER)));
+ result.add(new MetaDataColumnDescriptor("NUM_PREC_RADIX", Types.INTEGER, new Integer(10)));
+ result.add(new MetaDataColumnDescriptor("DECIMAL_DIGITS", Types.INTEGER, new Integer(0)));
+ result.add(new MetaDataColumnDescriptor("COLUMN_SIZE", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("IS_NULLABLE", Types.VARCHAR, "YES"));
+ result.add(new MetaDataColumnDescriptor("REMARKS", Types.VARCHAR));
+
+ return result;
+ }
+
+ /**
+ * Returns descriptors for the columns that shall be read from the result
+ * set when reading the meta data for primary keys. Note that the columns
+ * are read in the order defined by this list.
+ * Redefine this method if you want more columns or a different order.
+ *
+ * @return The map column name -> descriptor for the result set columns
+ */
+ protected List initColumnsForPK() {
+ List result = new ArrayList();
+
+ result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+ // we're also reading the table name so that a model reader impl can
+ // filter manually
+ result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+ // the name of the primary key is currently only interesting to the pk
+ // index name resolution
+ result.add(new MetaDataColumnDescriptor("PK_NAME", Types.VARCHAR));
+
+ return result;
+ }
+
+ /**
+ * Returns descriptors for the columns that shall be read from the result
+ * set when reading the meta data for foreign keys originating from a table.
+ * Note that the columns are read in the order defined by this list.
+ * Redefine this method if you want more columns or a different order.
+ *
+ * @return The map column name -> descriptor for the result set columns
+ */
+ protected List initColumnsForFK() {
+ List result = new ArrayList();
+
+ result.add(new MetaDataColumnDescriptor("PKTABLE_NAME", Types.VARCHAR));
+ // we're also reading the table name so that a model reader impl can
+ // filter manually
+ result.add(new MetaDataColumnDescriptor("FKTABLE_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("KEY_SEQ", Types.TINYINT, new Short((short) 0)));
+ result.add(new MetaDataColumnDescriptor("FK_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("PKCOLUMN_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("FKCOLUMN_NAME", Types.VARCHAR));
+
+ return result;
+ }
+
+ /**
+ * Returns descriptors for the columns that shall be read from the result
+ * set when reading the meta data for indices. Note that the columns are
+ * read in the order defined by this list.
+ * Redefine this method if you want more columns or a different order.
+ *
+ * @return The map column name -> descriptor for the result set columns
+ */
+ protected List initColumnsForIndex() {
+ List result = new ArrayList();
+
+ result.add(new MetaDataColumnDescriptor("INDEX_NAME", Types.VARCHAR));
+ // we're also reading the table name so that a model reader impl can
+ // filter manually
+ result.add(new MetaDataColumnDescriptor("TABLE_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("NON_UNIQUE", Types.BIT, Boolean.TRUE));
+ result.add(new MetaDataColumnDescriptor("ORDINAL_POSITION", Types.TINYINT, new Short(
+ (short) 0)));
+ result.add(new MetaDataColumnDescriptor("COLUMN_NAME", Types.VARCHAR));
+ result.add(new MetaDataColumnDescriptor("TYPE", Types.TINYINT));
+
+ return result;
+ }
+
+ /**
+ * Returns the catalog(s) in the database to read per default.
+ *
+ * @return The default catalog(s)
+ */
+ public String getDefaultCatalogPattern() {
+ return _defaultCatalogPattern;
+ }
+
+ /**
+ * Sets the catalog(s) in the database to read per default.
+ *
+ * @param catalogPattern
+ * The catalog(s)
+ */
+ public void setDefaultCatalogPattern(String catalogPattern) {
+ _defaultCatalogPattern = catalogPattern;
+ }
+
+ /**
+ * Returns the schema(s) in the database to read per default.
+ *
+ * @return The default schema(s)
+ */
+ public String getDefaultSchemaPattern() {
+ return _defaultSchemaPattern;
+ }
+
+ /**
+ * Sets the schema(s) in the database to read per default.
+ *
+ * @param schemaPattern
+ * The schema(s)
+ */
+ public void setDefaultSchemaPattern(String schemaPattern) {
+ _defaultSchemaPattern = schemaPattern;
+ }
+
+ /**
+ * Returns the default pattern to read the relevant tables from the
+ * database.
+ *
+ * @return The table pattern
+ */
+ public String getDefaultTablePattern() {
+ return _defaultTablePattern;
+ }
+
+ /**
+ * Sets the default pattern to read the relevant tables from the database.
+ *
+ * @param tablePattern
+ * The table pattern
+ */
+ public void setDefaultTablePattern(String tablePattern) {
+ _defaultTablePattern = tablePattern;
+ }
+
+ /**
+ * Returns the default pattern to read the relevant columns from the
+ * database.
+ *
+ * @return The column pattern
+ */
+ public String getDefaultColumnPattern() {
+ return _defaultColumnPattern;
+ }
+
+ /**
+ * Sets the default pattern to read the relevant columns from the database.
+ *
+ * @param columnPattern
+ * The column pattern
+ */
+ public void setDefaultColumnPattern(String columnPattern) {
+ _defaultColumnPattern = columnPattern;
+ }
+
+ /**
+ * Returns the table types to recognize per default.
+ *
+ * @return The default table types
+ */
+ public String[] getDefaultTableTypes() {
+ return _defaultTableTypes;
+ }
+
+ /**
+ * Sets the table types to recognize per default. Typical types are "TABLE",
+ * "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", "LOCAL TEMPORARY", "ALIAS",
+ * "SYNONYM".
+ *
+ * @param types
+ * The table types
+ */
+ public void setDefaultTableTypes(String[] types) {
+ _defaultTableTypes = types;
+ }
+
+ /**
+ * Returns the descriptors for the columns to be read from the table meta
+ * data result set.
+ *
+ * @return The column descriptors
+ */
+ protected List getColumnsForTable() {
+ return _columnsForTable;
+ }
+
+ /**
+ * Returns the descriptors for the columns to be read from the column meta
+ * data result set.
+ *
+ * @return The column descriptors
+ */
+ protected List getColumnsForColumn() {
+ return _columnsForColumn;
+ }
+
+ /**
+ * Returns the descriptors for the columns to be read from the primary key
+ * meta data result set.
+ *
+ * @return The column descriptors
+ */
+ protected List getColumnsForPK() {
+ return _columnsForPK;
+ }
+
+ /**
+ * Returns the descriptors for the columns to be read from the foreign key
+ * meta data result set.
+ *
+ * @return The column descriptors
+ */
+ protected List getColumnsForFK() {
+ return _columnsForFK;
+ }
+
+ /**
+ * Returns the descriptors for the columns to be read from the index meta
+ * data result set.
+ *
+ * @return The column descriptors
+ */
+ protected List getColumnsForIndex() {
+ return _columnsForIndex;
+ }
+
+ /**
+ * Returns the active connection. Note that this is only set during a call
+ * to {@link #readTables(String, String, String[])}.
+ *
+ * @return The connection or null
if there is no active
+ * connection
+ */
+ protected Connection getConnection() {
+ return _connection;
+ }
+
+ /**
+ * Reads the database model from the given connection.
+ *
+ * @param connection
+ * The connection
+ * @param name
+ * The name of the resulting database; null
when the
+ * default name (the catalog) is desired which might be
+ * null
itself though
+ * @return The database model
+ */
+ public Database getDatabase(Connection connection, String name) throws SQLException {
+ return getDatabase(connection, name, null, null, null);
+ }
+
+ /**
+ * Reads the database model from the given connection.
+ *
+ * @param connection
+ * The connection
+ * @param name
+ * The name of the resulting database; null
when the
+ * default name (the catalog) is desired which might be
+ * null
itself though
+ * @param catalog
+ * The catalog to acess in the database; use null
+ * for the default value
+ * @param schema
+ * The schema to acess in the database; use null
for
+ * the default value
+ * @param tableTypes
+ * The table types to process; use null
or an empty
+ * list for the default ones
+ * @return The database model
+ */
+ public Database getDatabase(Connection connection, String name, String catalog, String schema,
+ String[] tableTypes) throws SQLException {
+ Database db = new Database();
+
+ if (name == null) {
+ try {
+ db.setName(connection.getCatalog());
+ if (catalog == null) {
+ catalog = db.getName();
+ }
+ } catch (Exception ex) {
+ _log.log(LogLevel.INFO, ex, "Cannot determine the catalog name from connection.");
+ }
+ } else {
+ db.setName(name);
+ }
+ try {
+ _connection = connection;
+ db.addTables(readTables(catalog, schema, tableTypes));
+ // Note that we do this here instead of in readTable since platforms
+ // may redefine the
+ // readTable method whereas it is highly unlikely that this method
+ // gets redefined
+ if (getPlatform().getPlatformInfo().isForeignKeysSorted()) {
+ sortForeignKeys(db);
+ }
+ } finally {
+ _connection = null;
+ }
+ db.initialize();
+ return db;
+ }
+
+ /**
+ * Reads the tables from the database metadata.
+ *
+ * @param catalog
+ * The catalog to acess in the database; use null
+ * for the default value
+ * @param schemaPattern
+ * The schema(s) to acess in the database; use null
+ * for the default value
+ * @param tableTypes
+ * The table types to process; use null
or an empty
+ * list for the default ones
+ * @return The tables
+ */
+ protected Collection readTables(String catalog, String schemaPattern, String[] tableTypes)
+ throws SQLException {
+ ResultSet tableData = null;
+
+ try {
+ DatabaseMetaDataWrapper metaData = new DatabaseMetaDataWrapper();
+
+ metaData.setMetaData(_connection.getMetaData());
+ metaData.setCatalog(catalog == null ? getDefaultCatalogPattern() : catalog);
+ metaData.setSchemaPattern(schemaPattern == null ? getDefaultSchemaPattern()
+ : schemaPattern);
+ metaData.setTableTypes((tableTypes == null) || (tableTypes.length == 0) ? getDefaultTableTypes()
+ : tableTypes);
+
+ tableData = metaData.getTables(getDefaultTablePattern());
+
+ List tables = new ArrayList();
+
+ while (tableData.next()) {
+ Map values = readColumns(tableData, getColumnsForTable());
+ Table table = readTable(metaData, values);
+
+ if (table != null) {
+ tables.add(table);
+ }
+ }
+
+ final Collator collator = Collator.getInstance();
+
+ Collections.sort(tables, new Comparator() {
+ public int compare(Table obj1, Table obj2) {
+ return collator.compare(obj1.getName().toUpperCase(), obj2.getName()
+ .toUpperCase());
+ }
+ });
+ return tables;
+ } finally {
+ if (tableData != null) {
+ tableData.close();
+ }
+ }
+ }
+
+ protected String getPlatformTableName(String catalogName, String schemaName, String tblName) {
+ return tblName;
+ }
+
+ protected String getTableNamePattern(String tableName) {
+ return tableName;
+ }
+
+ /**
+ * Returns a new {@link Table} object.
+ */
+ protected Table readTable(String catalogName, String schemaName, String tblName,
+ boolean caseSensitive, boolean makeAllColumnsPKsIfNoneFound) {
+ Table table = readTableCaseSensitive(catalogName, schemaName, tblName, makeAllColumnsPKsIfNoneFound);
+
+ if (table == null && !caseSensitive) {
+ table = readTableCaseSensitive(StringUtils.upperCase(catalogName), StringUtils
+ .upperCase(schemaName), StringUtils.upperCase(tblName), makeAllColumnsPKsIfNoneFound);
+ if (table == null) {
+ table = readTableCaseSensitive(StringUtils.lowerCase(catalogName), StringUtils
+ .lowerCase(schemaName), StringUtils.lowerCase(tblName), makeAllColumnsPKsIfNoneFound);
+ if (table == null) {
+ table = readTableCaseSensitive(catalogName, schemaName, StringUtils
+ .upperCase(tblName), makeAllColumnsPKsIfNoneFound);
+ if (table == null) {
+ table = readTableCaseSensitive(catalogName, schemaName, StringUtils
+ .lowerCase(tblName), makeAllColumnsPKsIfNoneFound);
+ if (table == null) {
+ table = readTableCaseSensitive(catalogName, schemaName,
+ getPlatformTableName(catalogName, schemaName, tblName), makeAllColumnsPKsIfNoneFound);
+ }
+ }
+ }
+ }
+ }
+ return table;
+ }
+
+ protected Table readTableCaseSensitive(String catalogName, String schemaName,
+ final String tblName, final boolean makeAllColumnsPKsIfNoneFound) {
+ Table table = null;
+ try {
+ // If we don't provide a default schema or catalog, then on some
+ // databases multiple results will be found in the metadata from
+ // multiple schemas/catalogs
+ final String schema = StringUtils.isBlank(schemaName) ? _platform.getDefaultSchema() : schemaName;
+ final String catalog = StringUtils.isBlank(catalogName) ? _platform.getDefaultCatalog()
+ : catalogName;
+ table = (Table) new Template(dataSource).execute(new IConnectionCallback() {
+ public Table execute(Connection c) throws SQLException {
+ Table table = null;
+ DatabaseMetaDataWrapper metaData = new DatabaseMetaDataWrapper();
+ metaData.setMetaData(c.getMetaData());
+ metaData.setCatalog(catalog);
+ metaData.setSchemaPattern(schema);
+ metaData.setTableTypes(null);
+ String tableName = tblName;
+ if (_platform.getPlatformInfo().isStoresUpperCaseNamesInCatalog()) {
+ tableName = tblName.toUpperCase();
+ } else if (_platform.getPlatformInfo().isStoresLowerCaseNamesInCatalog()) {
+ tableName = tblName.toLowerCase();
+ }
+
+ ResultSet tableData = null;
+ try {
+ tableData = metaData.getTables(getTableNamePattern(tableName));
+ while (tableData != null && tableData.next()) {
+ Map values = readColumns(tableData,
+ initColumnsForTable());
+ table = readTable(metaData, values);
+ }
+ } finally {
+ Template.close(tableData);
+ }
+
+ if (makeAllColumnsPKsIfNoneFound) {
+ makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(table);
+ }
+
+ return table;
+ }
+ });
+ } catch (DataException ex) {
+ _log.log(LogLevel.WARN, ex);
+ }
+
+ return table;
+ }
+
+ /**
+ * Treat tables with no primary keys as a table with all primary keys.
+ */
+ protected void makeAllColumnsPrimaryKeysIfNoPrimaryKeysFound(Table table) {
+ if (table != null && table.getPrimaryKeyColumns() != null
+ && table.getPrimaryKeyColumns().size() == 0) {
+ Column[] allCoumns = table.getColumns();
+ for (Column column : allCoumns) {
+ if (!column.isOfBinaryType()) {
+ column.setPrimaryKey(true);
+ }
+ }
+ }
+ }
+
+
+ /**
+ * Reads the next table from the meta data.
+ *
+ * @param metaData
+ * The database meta data
+ * @param values
+ * The table metadata values as defined by
+ * {@link #getColumnsForTable()}
+ * @return The table or null
if the result set row did not
+ * contain a valid table
+ */
+ protected Table readTable(DatabaseMetaDataWrapper metaData, Map values)
+ throws SQLException {
+ String tableName = (String) values.get("TABLE_NAME");
+ Table table = null;
+
+ if ((tableName != null) && (tableName.length() > 0)) {
+ table = new Table();
+
+ table.setName(tableName);
+ table.setType((String) values.get("TABLE_TYPE"));
+ table.setCatalog((String) values.get("TABLE_CAT"));
+ table.setSchema((String) values.get("TABLE_SCHEM"));
+ table.setDescription((String) values.get("REMARKS"));
+
+ table.addColumns(readColumns(metaData, tableName));
+ table.addForeignKeys(readForeignKeys(metaData, tableName));
+ table.addIndices(readIndices(metaData, tableName));
+
+ Collection primaryKeys = readPrimaryKeyNames(metaData, tableName);
+
+ for (Iterator it = primaryKeys.iterator(); it.hasNext();) {
+ table.findColumn(it.next(), true).setPrimaryKey(true);
+ }
+
+ if (getPlatformInfo().isSystemIndicesReturned()) {
+ removeSystemIndices(metaData, table);
+ }
+ }
+ return table;
+ }
+
+ /**
+ * Removes system indices (generated by the database for primary and foreign
+ * keys) from the table.
+ *
+ * @param metaData
+ * The database meta data
+ * @param table
+ * The table
+ */
+ protected void removeSystemIndices(DatabaseMetaDataWrapper metaData, Table table)
+ throws SQLException {
+ removeInternalPrimaryKeyIndex(metaData, table);
+
+ for (int fkIdx = 0; fkIdx < table.getForeignKeyCount(); fkIdx++) {
+ removeInternalForeignKeyIndex(metaData, table, table.getForeignKey(fkIdx));
+ }
+ }
+
+ /**
+ * Tries to remove the internal index for the table's primary key.
+ *
+ * @param metaData
+ * The database meta data
+ * @param table
+ * The table
+ */
+ protected void removeInternalPrimaryKeyIndex(DatabaseMetaDataWrapper metaData, Table table)
+ throws SQLException {
+ List pks = table.getPrimaryKeyColumns();
+ List columnNames = new ArrayList();
+
+ for (Column col : pks) {
+ columnNames.add(col.getName());
+ }
+
+ for (int indexIdx = 0; indexIdx < table.getIndexCount();) {
+ Index index = table.getIndex(indexIdx);
+
+ if (index.isUnique() && matches(index, columnNames)
+ && isInternalPrimaryKeyIndex(metaData, table, index)) {
+ table.removeIndex(indexIdx);
+ } else {
+ indexIdx++;
+ }
+ }
+ }
+
+ /**
+ * Tries to remove the internal index for the given foreign key.
+ *
+ * @param metaData
+ * The database meta data
+ * @param table
+ * The table where the table is defined
+ * @param fk
+ * The foreign key
+ */
+ protected void removeInternalForeignKeyIndex(DatabaseMetaDataWrapper metaData, Table table,
+ ForeignKey fk) throws SQLException {
+ List columnNames = new ArrayList();
+ boolean mustBeUnique = !getPlatformInfo().isSystemForeignKeyIndicesAlwaysNonUnique();
+
+ for (int columnIdx = 0; columnIdx < fk.getReferenceCount(); columnIdx++) {
+ String name = fk.getReference(columnIdx).getLocalColumnName();
+ Column localColumn = table
+ .findColumn(name, getPlatform().getPlatformInfo().isDelimitedIdentifierModeOn());
+
+ if (mustBeUnique && !localColumn.isPrimaryKey()) {
+ mustBeUnique = false;
+ }
+ columnNames.add(name);
+ }
+
+ for (int indexIdx = 0; indexIdx < table.getIndexCount();) {
+ Index index = table.getIndex(indexIdx);
+
+ if ((mustBeUnique == index.isUnique()) && matches(index, columnNames)
+ && isInternalForeignKeyIndex(metaData, table, fk, index)) {
+ fk.setAutoIndexPresent(true);
+ table.removeIndex(indexIdx);
+ } else {
+ indexIdx++;
+ }
+ }
+ }
+
+ /**
+ * Checks whether the given index matches the column list.
+ *
+ * @param index
+ * The index
+ * @param columnsToSearchFor
+ * The names of the columns that the index should be for
+ * @return true
if the index matches the columns
+ */
+ protected boolean matches(Index index, List columnsToSearchFor) {
+ if (index.getColumnCount() != columnsToSearchFor.size()) {
+ return false;
+ }
+ for (int columnIdx = 0; columnIdx < index.getColumnCount(); columnIdx++) {
+ if (!columnsToSearchFor.get(columnIdx).equals(index.getColumn(columnIdx).getName())) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Tries to determine whether the index is the internal database-generated
+ * index for the given table's primary key. Note that only unique indices
+ * with the correct columns are fed to this method. Redefine this method for
+ * specific platforms if there are better ways to determine internal
+ * indices.
+ *
+ * @param metaData
+ * The database meta data
+ * @param table
+ * The table owning the index
+ * @param index
+ * The index to check
+ * @return true
if the index seems to be an internal primary
+ * key one
+ */
+ protected boolean isInternalPrimaryKeyIndex(DatabaseMetaDataWrapper metaData, Table table,
+ Index index) throws SQLException {
+ return false;
+ }
+
+ /**
+ * Tries to determine whether the index is the internal database-generated
+ * index for the given foreign key. Note that only non-unique indices with
+ * the correct columns are fed to this method. Redefine this method for
+ * specific platforms if there are better ways to determine internal
+ * indices.
+ *
+ * @param metaData
+ * The database meta data
+ * @param table
+ * The table owning the index and foreign key
+ * @param fk
+ * The foreign key
+ * @param index
+ * The index to check
+ * @return true
if the index seems to be an internal primary
+ * key one
+ */
+ protected boolean isInternalForeignKeyIndex(DatabaseMetaDataWrapper metaData, Table table,
+ ForeignKey fk, Index index) throws SQLException {
+ return false;
+ }
+
+ /**
+ * Reads the column definitions for the indicated table.
+ *
+ * @param metaData
+ * The database meta data
+ * @param tableName
+ * The name of the table
+ * @return The columns
+ */
+ protected Collection readColumns(DatabaseMetaDataWrapper metaData, String tableName)
+ throws SQLException {
+ ResultSet columnData = null;
+
+ try {
+ columnData = metaData.getColumns(tableName, getDefaultColumnPattern());
+
+ List columns = new ArrayList();
+
+ while (columnData.next()) {
+ Map values = readColumns(columnData, getColumnsForColumn());
+
+ columns.add(readColumn(metaData, values));
+ }
+ return columns;
+ } finally {
+ if (columnData != null) {
+ columnData.close();
+ }
+ }
+ }
+
+ /**
+ * Extracts a column definition from the result set.
+ *
+ * @param metaData
+ * The database meta data
+ * @param values
+ * The column meta data values as defined by
+ * {@link #getColumnsForColumn()}
+ * @return The column
+ */
+ protected Column readColumn(DatabaseMetaDataWrapper metaData, Map values)
+ throws SQLException {
+ Column column = new Column();
+
+ column.setName((String) values.get("COLUMN_NAME"));
+ column.setDefaultValue((String) values.get("COLUMN_DEF"));
+ column.setTypeCode(((Integer) values.get("DATA_TYPE")).intValue());
+ column.setPrecisionRadix(((Integer) values.get("NUM_PREC_RADIX")).intValue());
+
+ String size = (String) values.get("COLUMN_SIZE");
+ int scale = ((Integer) values.get("DECIMAL_DIGITS")).intValue();
+
+ if (size == null) {
+ size = (String) _defaultSizes.get(new Integer(column.getTypeCode()));
+ }
+ // we're setting the size after the precision and radix in case
+ // the database prefers to return them in the size value
+ column.setSize(size);
+ if (scale != 0) {
+ // if there is a scale value, set it after the size (which probably
+ // did not contain
+ // a scale specification)
+ column.setScale(scale);
+ }
+ column.setRequired("NO".equalsIgnoreCase(((String) values.get("IS_NULLABLE")).trim()));
+ column.setDescription((String) values.get("REMARKS"));
+ return column;
+ }
+
+ /**
+ * Retrieves the names of the columns that make up the primary key for a
+ * given table.
+ *
+ * @param metaData
+ * The database meta data
+ * @param tableName
+ * The name of the table from which to retrieve PK information
+ * @return The primary key column names
+ */
+ protected Collection readPrimaryKeyNames(DatabaseMetaDataWrapper metaData,
+ String tableName) throws SQLException {
+ List pks = new ArrayList();
+ ResultSet pkData = null;
+
+ try {
+ pkData = metaData.getPrimaryKeys(tableName);
+ while (pkData.next()) {
+ Map values = readColumns(pkData, getColumnsForPK());
+
+ pks.add(readPrimaryKeyName(metaData, values));
+ }
+ } finally {
+ if (pkData != null) {
+ pkData.close();
+ }
+ }
+ return pks;
+ }
+
+ /**
+ * Extracts a primary key name from the result set.
+ *
+ * @param metaData
+ * The database meta data
+ * @param values
+ * The primary key meta data values as defined by
+ * {@link #getColumnsForPK()}
+ * @return The primary key name
+ */
+ protected String readPrimaryKeyName(DatabaseMetaDataWrapper metaData, Map values)
+ throws SQLException {
+ return (String) values.get("COLUMN_NAME");
+ }
+
+ /**
+ * Retrieves the foreign keys of the indicated table.
+ *
+ * @param metaData
+ * The database meta data
+ * @param tableName
+ * The name of the table from which to retrieve FK information
+ * @return The foreign keys
+ */
+ protected Collection readForeignKeys(DatabaseMetaDataWrapper metaData,
+ String tableName) throws SQLException {
+ Map fks = new LinkedHashMap();
+ ResultSet fkData = null;
+
+ try {
+ fkData = metaData.getForeignKeys(tableName);
+
+ while (fkData.next()) {
+ Map values = readColumns(fkData, getColumnsForFK());
+
+ readForeignKey(metaData, values, fks);
+ }
+ } finally {
+ if (fkData != null) {
+ fkData.close();
+ }
+ }
+ return fks.values();
+ }
+
+ /**
+ * Reads the next foreign key spec from the result set.
+ *
+ * @param metaData
+ * The database meta data
+ * @param values
+ * The foreign key meta data as defined by
+ * {@link #getColumnsForFK()}
+ * @param knownFks
+ * The already read foreign keys for the current table
+ */
+ protected void readForeignKey(DatabaseMetaDataWrapper metaData, Map values,
+ Map knownFks) throws SQLException {
+ String fkName = (String) values.get("FK_NAME");
+ ForeignKey fk = (ForeignKey) knownFks.get(fkName);
+
+ if (fk == null) {
+ fk = new ForeignKey(fkName);
+ fk.setForeignTableName((String) values.get("PKTABLE_NAME"));
+ knownFks.put(fkName, fk);
+ }
+
+ Reference ref = new Reference();
+
+ ref.setForeignColumnName((String) values.get("PKCOLUMN_NAME"));
+ ref.setLocalColumnName((String) values.get("FKCOLUMN_NAME"));
+ if (values.containsKey("KEY_SEQ")) {
+ ref.setSequenceValue(((Short) values.get("KEY_SEQ")).intValue());
+ }
+ fk.addReference(ref);
+ }
+
+ /**
+ * Determines the indices for the indicated table.
+ *
+ * @param metaData
+ * The database meta data
+ * @param tableName
+ * The name of the table
+ * @return The list of indices
+ */
+ protected Collection readIndices(DatabaseMetaDataWrapper metaData, String tableName)
+ throws SQLException {
+ Map indices = new LinkedHashMap();
+ ResultSet indexData = null;
+
+ try {
+ indexData = metaData.getIndices(tableName, false, false);
+
+ while (indexData.next()) {
+ Map values = readColumns(indexData, getColumnsForIndex());
+
+ readIndex(metaData, values, indices);
+ }
+ } finally {
+ if (indexData != null) {
+ indexData.close();
+ }
+ }
+ return indices.values();
+ }
+
+ /**
+ * Reads the next index spec from the result set.
+ *
+ * @param metaData
+ * The database meta data
+ * @param values
+ * The index meta data as defined by
+ * {@link #getColumnsForIndex()}
+ * @param knownIndices
+ * The already read indices for the current table
+ */
+ protected void readIndex(DatabaseMetaDataWrapper metaData, Map values,
+ Map knownIndices) throws SQLException {
+ Short indexType = (Short) values.get("TYPE");
+
+ // we're ignoring statistic indices
+ if ((indexType != null) && (indexType.shortValue() == DatabaseMetaData.tableIndexStatistic)) {
+ return;
+ }
+
+ String indexName = (String) values.get("INDEX_NAME");
+
+ if (indexName != null) {
+ Index index = (Index) knownIndices.get(indexName);
+
+ if (index == null) {
+ if (((Boolean) values.get("NON_UNIQUE")).booleanValue()) {
+ index = new NonUniqueIndex();
+ } else {
+ index = new UniqueIndex();
+ }
+
+ index.setName(indexName);
+ knownIndices.put(indexName, index);
+ }
+
+ IndexColumn indexColumn = new IndexColumn();
+
+ indexColumn.setName((String) values.get("COLUMN_NAME"));
+ if (values.containsKey("ORDINAL_POSITION")) {
+ indexColumn.setOrdinalPosition(((Short) values.get("ORDINAL_POSITION")).intValue());
+ }
+ index.addColumn(indexColumn);
+ }
+ }
+
+ /**
+ * Reads the indicated columns from the result set.
+ *
+ * @param resultSet
+ * The result set
+ * @param columnDescriptors
+ * The dscriptors of the columns to read
+ * @return The read values keyed by the column name
+ */
+ protected Map readColumns(ResultSet resultSet,
+ List columnDescriptors) throws SQLException {
+ HashMap values = new HashMap();
+
+ for (Iterator it = columnDescriptors.iterator(); it.hasNext();) {
+ MetaDataColumnDescriptor descriptor = it.next();
+
+ values.put(descriptor.getName(), descriptor.readColumn(resultSet));
+ }
+ return values;
+ }
+
+ protected void determineAutoIncrementFromResultSetMetaData(Table table,
+ final Column columnsToCheck[]) throws SQLException {
+ determineAutoIncrementFromResultSetMetaData(getConnection(), table, columnsToCheck);
+ }
+
+ protected void determineAutoIncrementFromResultSetMetaData(Connection conn, Table table,
+ final Column columnsToCheck[]) throws SQLException {
+ determineAutoIncrementFromResultSetMetaData(conn, table, columnsToCheck, ".");
+ }
+
+ /**
+ * Helper method that determines the auto increment status for the given
+ * columns via the {@link ResultSetMetaData#isAutoIncrement(int)} method.
+ *
+ * Fix problems following problems: 1) identifiers that use keywords 2)
+ * different catalog and schema 3) different catalog separator character * @param
+ * table The table
+ *
+ * @param columnsToCheck
+ * The columns to check (e.g. the primary key columns)
+ */
+ public void determineAutoIncrementFromResultSetMetaData(Connection conn, Table table,
+ final Column columnsToCheck[], String catalogSeparator) throws SQLException {
+ StringBuilder query = new StringBuilder();
+ try {
+ if (columnsToCheck == null || columnsToCheck.length == 0) {
+ return;
+ }
+ query.append("SELECT ");
+ for (int idx = 0; idx < columnsToCheck.length; idx++) {
+ if (idx > 0) {
+ query.append(",");
+ }
+ query.append("t.");
+ appendIdentifier(query, columnsToCheck[idx].getName());
+ }
+ query.append(" FROM ");
+
+ if (table.getCatalog() != null && !table.getCatalog().trim().equals("")) {
+ appendIdentifier(query, table.getCatalog());
+ query.append(catalogSeparator);
+ }
+ if (table.getSchema() != null && !table.getSchema().trim().equals("")) {
+ appendIdentifier(query, table.getSchema()).append(".");
+ }
+ appendIdentifier(query, table.getName()).append(" t WHERE 1 = 0");
+
+ Statement stmt = null;
+ try {
+ stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery(query.toString());
+ ResultSetMetaData rsMetaData = rs.getMetaData();
+
+ for (int idx = 0; idx < columnsToCheck.length; idx++) {
+ if (rsMetaData.isAutoIncrement(idx + 1)) {
+ columnsToCheck[idx].setAutoIncrement(true);
+ }
+ }
+ } finally {
+ if (stmt != null) {
+ stmt.close();
+ }
+ }
+ } catch (SQLException ex) {
+ StringBuilder msg = new StringBuilder(
+ "Failed to determine auto increment columns using this query: '" + query
+ + "'. This is probably not harmful, but should be fixed. ");
+ msg.append("\n");
+ msg.append(table.toString());
+ if (columnsToCheck != null) {
+ for (Column col : columnsToCheck) {
+ msg.append("\n");
+ msg.append(col.toString());
+ }
+ }
+ _log.log(LogLevel.WARN, msg.toString());
+ }
+ }
+
+ public StringBuilder appendIdentifier(StringBuilder query, String identifier) {
+ if (getPlatform().getPlatformInfo().isDelimitedIdentifierModeOn()) {
+ query.append(getPlatformInfo().getDelimiterToken());
+ }
+ query.append(identifier);
+ if (getPlatform().getPlatformInfo().isDelimitedIdentifierModeOn()) {
+ query.append(getPlatformInfo().getDelimiterToken());
+ }
+ return query;
+ }
+
+ /**
+ * Sorts the foreign keys in the tables of the model.
+ *
+ * @param model
+ * The model
+ */
+ protected void sortForeignKeys(Database model) {
+ for (int tableIdx = 0; tableIdx < model.getTableCount(); tableIdx++) {
+ model.getTable(tableIdx).sortForeignKeys(getPlatform().getPlatformInfo().isDelimitedIdentifierModeOn());
+ }
+ }
+
+ /**
+ * Replaces a specific character sequence in the given text with the
+ * character sequence whose escaped version it is.
+ *
+ * @param text
+ * The text
+ * @param unescaped
+ * The unescaped string, e.g. "'"
+ * @param escaped
+ * The escaped version, e.g. "''"
+ * @return The resulting text
+ */
+ protected String unescape(String text, String unescaped, String escaped) {
+ String result = text;
+
+ // we need special handling if the single quote is escaped via a double
+ // single quote
+ if (result != null) {
+ if (escaped.equals("''")) {
+ if ((result.length() > 2) && result.startsWith("'") && result.endsWith("'")) {
+ result = "'"
+ + StringUtils.replace(result.substring(1, result.length() - 1),
+ escaped, unescaped) + "'";
+ } else {
+ result = StringUtils.replace(result, escaped, unescaped);
+ }
+ } else {
+ result = StringUtils.replace(result, escaped, unescaped);
+ }
+ }
+ return result;
+ }
+
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/JdbcPlatformFactory.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/JdbcPlatformFactory.java
new file mode 100644
index 0000000000..2ced6f0f0d
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/JdbcPlatformFactory.java
@@ -0,0 +1,95 @@
+package org.jumpmind.symmetric.jdbc.db;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.sql.DataSource;
+
+import org.jumpmind.symmetric.data.db.IPlatform;
+import org.jumpmind.symmetric.data.process.sql.DataException;
+import org.jumpmind.symmetric.jdbc.db.oracle.OraclePlatform;
+
+public class JdbcPlatformFactory {
+
+ private static Map> platforms = null;
+
+ public static IPlatform createPlatform(DataSource dataSource) {
+ String platformId = lookupPlatformId(dataSource, true);
+ AbstractJdbcPlatform platform = createNewPlatformInstance(platformId);
+ if (platform == null) {
+ platformId = lookupPlatformId(dataSource, false);
+ platform = createNewPlatformInstance(platformId);
+ }
+
+ if (platform != null) {
+ platform.setDataSource(dataSource);
+ }
+ return platform;
+ }
+
+ /**
+ * Creates a new platform for the given (case insensitive) platform
+ * identifier or returns null if the database is not recognized.
+ *
+ * @param databaseName
+ * The name of the database (case is not important)
+ * @return The platform or null
if the database is not
+ * supported
+ */
+ public static AbstractJdbcPlatform createNewPlatformInstance(String databaseName) {
+ Class extends IPlatform> platformClass = getPlatforms().get(databaseName.toLowerCase());
+
+ try {
+ return platformClass != null ? (AbstractJdbcPlatform) platformClass.newInstance() : null;
+ } catch (Exception ex) {
+ throw new DataException("Could not create platform for database " + databaseName, ex);
+ }
+ }
+
+ public static String lookupPlatformId(DataSource dataSource, boolean includeVersion)
+ throws DataException {
+ Connection connection = null;
+
+ try {
+ connection = dataSource.getConnection();
+ DatabaseMetaData metaData = connection.getMetaData();
+ String productString = metaData.getDatabaseProductName();
+ if (includeVersion) {
+ int majorVersion = metaData.getDatabaseMajorVersion();
+ if (majorVersion > 0) {
+ productString += majorVersion;
+ }
+ }
+
+ return productString;
+ } catch (SQLException ex) {
+ throw new DataException(
+ "Error while reading the database metadata: " + ex.getMessage(), ex);
+ } finally {
+ if (connection != null) {
+ try {
+ connection.close();
+ } catch (SQLException ex) {
+ // we ignore this one
+ }
+ }
+ }
+ }
+
+ private static synchronized Map> getPlatforms() {
+ if (platforms == null) {
+ platforms = registerPlatforms();
+ }
+ return platforms;
+ }
+
+ private static synchronized Map> registerPlatforms() {
+ Map> platforms = new HashMap>();
+ platforms.put(OraclePlatform.PLATFORMID, OraclePlatform.class);
+ return platforms;
+ }
+
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/MetaDataColumnDescriptor.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/MetaDataColumnDescriptor.java
new file mode 100644
index 0000000000..ab01993450
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/MetaDataColumnDescriptor.java
@@ -0,0 +1,135 @@
+package org.jumpmind.symmetric.jdbc.db;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Types;
+
+/**
+ * Describes a column in a metadata result set.
+ *
+ * @version $Revision: $
+ */
+public class MetaDataColumnDescriptor
+{
+ /** The name of the column. */
+ private String _columnName;
+ /** The jdbc type to read from the result set. */
+ private int _jdbcType;
+ /** The default value if the column is not present in the result set. */
+ private Object _defaultValue;
+
+ /**
+ * Creates a new descriptor instance.
+ *
+ * @param columnName The name of the column
+ * @param jdbcType The jdbc type for reading from the result set, one of
+ * VARCHAR, INTEGER, TINYINT, BIT
+ */
+ public MetaDataColumnDescriptor(String columnName, int jdbcType)
+ {
+ this(columnName, jdbcType, null);
+ }
+
+ /**
+ * Creates a new descriptor instance.
+ *
+ * @param columnName The name of the column
+ * @param jdbcType The jdbc type for reading from the result set, one of
+ * VARCHAR, INTEGER, TINYINT, BIT
+ * @param defaultValue The default value if the column is not present in the result set
+ */
+ public MetaDataColumnDescriptor(String columnName, int jdbcType, Object defaultValue)
+ {
+ _columnName = columnName.toUpperCase();
+ _jdbcType = jdbcType;
+ _defaultValue = defaultValue;
+ }
+
+ /**
+ * Returns the name.
+ *
+ * @return The name
+ */
+ public String getName()
+ {
+ return _columnName;
+ }
+
+ /**
+ * Returns the default value.
+ *
+ * @return The default value
+ */
+ public Object getDefaultValue()
+ {
+ return _defaultValue;
+ }
+
+ /**
+ * Returns the jdbc type to read from the result set.
+ *
+ * @return The jdbc type
+ */
+ public int getJdbcType()
+ {
+ return _jdbcType;
+ }
+
+ /**
+ * Reads the column from the result set.
+ *
+ * @param resultSet The result set
+ * @return The column value or the default value if the column is not present in the result set
+ */
+ public Object readColumn(ResultSet resultSet) throws SQLException
+ {
+ ResultSetMetaData metaData = resultSet.getMetaData();
+ int foundIdx = -1;
+
+ for (int idx = 1; (foundIdx < 0) && (idx <= metaData.getColumnCount()); idx++)
+ {
+ if (_columnName.equals(metaData.getColumnName(idx).toUpperCase()))
+ {
+ foundIdx = idx;
+ }
+ }
+ if (foundIdx > 0)
+ {
+ switch (_jdbcType)
+ {
+ case Types.BIT:
+ return new Boolean(resultSet.getBoolean(foundIdx));
+ case Types.INTEGER:
+ return new Integer(resultSet.getInt(foundIdx));
+ case Types.TINYINT:
+ return new Short(resultSet.getShort(foundIdx));
+ default:
+ return resultSet.getString(foundIdx);
+ }
+ }
+ else
+ {
+ return _defaultValue;
+ }
+ }
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/oracle/OracleJdbcModelReader.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/oracle/OracleJdbcModelReader.java
new file mode 100644
index 0000000000..e963a5024f
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/oracle/OracleJdbcModelReader.java
@@ -0,0 +1,346 @@
+package org.jumpmind.symmetric.jdbc.db.oracle;
+
+import java.sql.DatabaseMetaData;
+import java.sql.Date;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.sql.DataSource;
+
+import org.jumpmind.symmetric.data.common.LogLevel;
+import org.jumpmind.symmetric.data.db.IPlatform;
+import org.jumpmind.symmetric.data.model.Column;
+import org.jumpmind.symmetric.data.model.Index;
+import org.jumpmind.symmetric.data.model.Table;
+import org.jumpmind.symmetric.data.model.TypeMap;
+import org.jumpmind.symmetric.jdbc.db.DatabaseMetaDataWrapper;
+import org.jumpmind.symmetric.jdbc.db.JdbcModelReader;
+
+public class OracleJdbcModelReader extends JdbcModelReader {
+
+ /** The regular expression pattern for the Oracle conversion of ISO dates. */
+ private Pattern oracleIsoDatePattern;
+
+ /** The regular expression pattern for the Oracle conversion of ISO times. */
+ private Pattern oracleIsoTimePattern;
+
+ /**
+ * The regular expression pattern for the Oracle conversion of ISO
+ * timestamps.
+ */
+ private Pattern oracleIsoTimestampPattern;
+
+ /**
+ * Creates a new model reader for Oracle 8 databases.
+ *
+ * @param platform
+ * The platform that this model reader belongs to
+ */
+ public OracleJdbcModelReader(IPlatform platform, DataSource dataSource) {
+ super(platform, dataSource);
+ setDefaultCatalogPattern(null);
+ setDefaultSchemaPattern(null);
+ setDefaultTablePattern("%");
+
+ oracleIsoDatePattern = Pattern.compile("TO_DATE\\('([^']*)'\\, 'YYYY\\-MM\\-DD'\\)");
+ oracleIsoTimePattern = Pattern.compile("TO_DATE\\('([^']*)'\\, 'HH24:MI:SS'\\)");
+ oracleIsoTimestampPattern = Pattern
+ .compile("TO_DATE\\('([^']*)'\\, 'YYYY\\-MM\\-DD HH24:MI:SS'\\)");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ protected Column readColumn(DatabaseMetaDataWrapper metaData, Map values) throws SQLException {
+ Column column = super.readColumn(metaData, values);
+
+ if (column.getDefaultValue() != null) {
+ // Oracle pads the default value with spaces
+ column.setDefaultValue(column.getDefaultValue().trim());
+ }
+ if (column.getTypeCode() == Types.DECIMAL) {
+ // We're back-mapping the NUMBER columns returned by Oracle
+ // Note that the JDBC driver returns DECIMAL for these NUMBER
+ // columns
+ switch (column.getSizeAsInt()) {
+ case 1:
+ if (column.getScale() == 0) {
+ column.setTypeCode(Types.BIT);
+ }
+ break;
+ case 3:
+ if (column.getScale() == 0) {
+ column.setTypeCode(Types.TINYINT);
+ }
+ break;
+ case 5:
+ if (column.getScale() == 0) {
+ column.setTypeCode(Types.SMALLINT);
+ }
+ break;
+ case 18:
+ column.setTypeCode(Types.REAL);
+ break;
+ case 22:
+ if (column.getScale() == 0) {
+ column.setTypeCode(Types.INTEGER);
+ }
+ break;
+ case 38:
+ if (column.getScale() == 0) {
+ column.setTypeCode(Types.BIGINT);
+ } else {
+ column.setTypeCode(Types.DOUBLE);
+ }
+ break;
+ }
+ } else if (column.getTypeCode() == Types.FLOAT) {
+ // Same for REAL, FLOAT, DOUBLE PRECISION, which all back-map to
+ // FLOAT but with
+ // different sizes (63 for REAL, 126 for FLOAT/DOUBLE PRECISION)
+ switch (column.getSizeAsInt()) {
+ case 63:
+ column.setTypeCode(Types.REAL);
+ break;
+ case 126:
+ column.setTypeCode(Types.DOUBLE);
+ break;
+ }
+ } else if ((column.getTypeCode() == Types.DATE)
+ || (column.getTypeCode() == Types.TIMESTAMP)) {
+ // Oracle has only one DATE/TIME type, so we can't know which it is
+ // and thus map
+ // it back to TIMESTAMP
+ column.setTypeCode(Types.TIMESTAMP);
+
+ // we also reverse the ISO-format adaptation, and adjust the default
+ // value to timestamp
+ if (column.getDefaultValue() != null) {
+ Timestamp timestamp = null;
+
+ Matcher matcher = oracleIsoTimestampPattern.matcher(column.getDefaultValue());
+ if (matcher.matches()) {
+ String timestampVal = matcher.group(1);
+ timestamp = Timestamp.valueOf(timestampVal);
+ } else {
+ matcher = oracleIsoDatePattern.matcher(column.getDefaultValue());
+ if (matcher.matches()) {
+ String dateVal = matcher.group(1);
+ timestamp = new Timestamp(Date.valueOf(dateVal).getTime());
+ } else {
+ matcher = oracleIsoTimePattern.matcher(column.getDefaultValue());
+ if (matcher.matches()) {
+ String dateVal = matcher.group(1);
+ timestamp = new Timestamp(Time.valueOf(dateVal).getTime());
+ }
+ }
+ }
+
+ if (timestamp != null) {
+ column.setDefaultValue(timestamp.toString());
+ }
+ }
+ } else if (TypeMap.isTextType(column.getTypeCode())) {
+ column.setDefaultValue(unescape(column.getDefaultValue(), "'", "''"));
+ }
+ return column;
+ }
+
+ /**
+ * Helper method that determines the auto increment status using Firebird's
+ * system tables.
+ *
+ * @param table
+ * The table
+ */
+ protected void determineAutoIncrementColumns(Table table) throws SQLException {
+ Column[] columns = table.getColumns();
+
+ for (int idx = 0; idx < columns.length; idx++) {
+ columns[idx].setAutoIncrement(isAutoIncrement(table, columns[idx]));
+ }
+ }
+
+ /**
+ * Tries to determine whether the given column is an identity column.
+ *
+ * @param table
+ * The table
+ * @param column
+ * The column
+ * @return true
if the column is an identity column
+ */
+ protected boolean isAutoIncrement(Table table, Column column) throws SQLException {
+ // TODO: For now, we only check whether there is a sequence & trigger as
+ // generated by DdlUtils
+ // But once sequence/trigger support is in place, it might be possible
+ // to 'parse' the
+ // trigger body (via SELECT trigger_name, trigger_body FROM
+ // user_triggers) in order to
+ // determine whether it fits our auto-increment definition
+ PreparedStatement prepStmt = null;
+ String triggerName = getPlatform().getConstraintName("trg", table, column.getName(), null);
+ String seqName = getPlatform().getConstraintName("seq", table, column.getName(), null);
+
+ if (!getPlatform().getPlatformInfo().isDelimitedIdentifierModeOn()) {
+ triggerName = triggerName.toUpperCase();
+ seqName = seqName.toUpperCase();
+ }
+ try {
+ prepStmt = getConnection().prepareStatement(
+ "SELECT * FROM user_triggers WHERE trigger_name = ?");
+ prepStmt.setString(1, triggerName);
+
+ ResultSet resultSet = prepStmt.executeQuery();
+
+ if (!resultSet.next()) {
+ return false;
+ }
+ // we have a trigger, so lets check the sequence
+ prepStmt.close();
+
+ prepStmt = getConnection().prepareStatement(
+ "SELECT * FROM user_sequences WHERE sequence_name = ?");
+ prepStmt.setString(1, seqName);
+
+ resultSet = prepStmt.executeQuery();
+ return resultSet.next();
+ } finally {
+ if (prepStmt != null) {
+ prepStmt.close();
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ protected Collection readIndices(DatabaseMetaDataWrapper metaData, String tableName)
+ throws SQLException {
+ // Oracle bug 4999817 causes a table analyze to execute in response to a
+ // call to
+ // DatabaseMetaData#getIndexInfo.
+ // The bug is fixed in driver version 10.2.0.4. The bug is present in at
+ // least
+ // driver versions 10.2.0.1.0, 10.1.0.2.0, and 9.2.0.5.
+ // To avoid this bug, we will access user_indexes view.
+ // This also allows us to filter system-generated indices which are
+ // identified by either
+ // having GENERATED='Y' in the query result, or by their index names
+ // being equal to the
+ // name of the primary key of the table
+
+ StringBuffer query = new StringBuffer();
+
+ query.append("SELECT a.INDEX_NAME, a.INDEX_TYPE, a.UNIQUENESS, b.COLUMN_NAME, b.COLUMN_POSITION FROM USER_INDEXES a, USER_IND_COLUMNS b WHERE ");
+ query.append("a.TABLE_NAME=? AND a.GENERATED=? AND a.TABLE_TYPE=? AND a.TABLE_NAME=b.TABLE_NAME AND a.INDEX_NAME=b.INDEX_NAME AND ");
+ query.append("a.INDEX_NAME NOT IN (SELECT DISTINCT c.CONSTRAINT_NAME FROM USER_CONSTRAINTS c WHERE c.CONSTRAINT_TYPE=? AND c.TABLE_NAME=a.TABLE_NAME");
+ if (metaData.getSchemaPattern() != null) {
+ query.append(" AND c.OWNER LIKE ?) AND a.TABLE_OWNER LIKE ?");
+ } else {
+ query.append(")");
+ }
+
+ Map indices = new LinkedHashMap();
+ PreparedStatement stmt = null;
+
+ try {
+ stmt = getConnection().prepareStatement(query.toString());
+ stmt.setString(1,
+ getPlatform().getPlatformInfo().isDelimitedIdentifierModeOn() ? tableName
+ : tableName.toUpperCase());
+ stmt.setString(2, "N");
+ stmt.setString(3, "TABLE");
+ stmt.setString(4, "P");
+ if (metaData.getSchemaPattern() != null) {
+ stmt.setString(5, metaData.getSchemaPattern().toUpperCase());
+ stmt.setString(6, metaData.getSchemaPattern().toUpperCase());
+ }
+
+ ResultSet rs = stmt.executeQuery();
+ Map values = new HashMap();
+
+ while (rs.next()) {
+ String name = rs.getString(1);
+ String type = rs.getString(2);
+ // Only read in normal oracle indexes
+ if (type.startsWith("NORMAL")) {
+ values.put("INDEX_TYPE", new Short(DatabaseMetaData.tableIndexOther));
+ values.put("INDEX_NAME", name);
+ values.put("NON_UNIQUE",
+ "UNIQUE".equalsIgnoreCase(rs.getString(3)) ? Boolean.FALSE
+ : Boolean.TRUE);
+ values.put("COLUMN_NAME", rs.getString(4));
+ values.put("ORDINAL_POSITION", new Short(rs.getShort(5)));
+
+ readIndex(metaData, values, indices);
+ } else {
+ _log.log(LogLevel.WARN, "Skipping index %s of type %s", name, type);
+ }
+ }
+ } finally {
+ if (stmt != null) {
+ stmt.close();
+ }
+ }
+ return indices.values();
+ }
+
+ protected Table readTable(DatabaseMetaDataWrapper metaData, Map values)
+ throws SQLException {
+ // Oracle 10 added the recycle bin which contains dropped database
+ // objects not yet purged
+ // Since we don't want entries from the recycle bin, we filter them out
+ PreparedStatement stmt = null;
+ boolean deletedObj = false;
+
+ try {
+ stmt = getConnection().prepareStatement("SELECT * FROM RECYCLEBIN WHERE OBJECT_NAME=?");
+ stmt.setString(1, (String) values.get("TABLE_NAME"));
+
+ ResultSet rs = stmt.executeQuery();
+
+ if (rs.next()) {
+ // we found the table in the recycle bin, so its a deleted one
+ // which we ignore
+ deletedObj = true;
+ }
+ rs.close();
+ } finally {
+ if (stmt != null) {
+ stmt.close();
+ }
+ }
+
+ if (!deletedObj) {
+ String tableName = (String) values.get("TABLE_NAME");
+
+ // system table ?
+ if (tableName.indexOf('$') > 0) {
+ return null;
+ }
+
+ Table table = super.readTable(metaData, values);
+
+ if (table != null) {
+ determineAutoIncrementColumns(table);
+ }
+
+ return table;
+
+ } else {
+ return null;
+ }
+ }
+
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/oracle/OraclePlatform.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/oracle/OraclePlatform.java
new file mode 100644
index 0000000000..1b0a275bce
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/db/oracle/OraclePlatform.java
@@ -0,0 +1,69 @@
+package org.jumpmind.symmetric.jdbc.db.oracle;
+
+import java.sql.Types;
+
+import org.jumpmind.symmetric.data.common.StringUtils;
+import org.jumpmind.symmetric.data.db.PlatformInfo;
+import org.jumpmind.symmetric.jdbc.db.AbstractJdbcPlatform;
+import org.jumpmind.symmetric.jdbc.sql.Template;
+
+public class OraclePlatform extends AbstractJdbcPlatform {
+
+ public static String PLATFORMID = "Oracle";
+
+ public OraclePlatform() {
+ PlatformInfo info = getPlatformInfo();
+
+ info.setMaxIdentifierLength(30);
+ info.setIdentityStatusReadingSupported(false);
+
+ // Note that the back-mappings are partially done by the model reader,
+ // not the driver
+ info.addNativeTypeMapping(Types.ARRAY, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.BIGINT, "NUMBER(38)");
+ info.addNativeTypeMapping(Types.BINARY, "RAW", Types.VARBINARY);
+ info.addNativeTypeMapping(Types.BIT, "NUMBER(1)");
+ info.addNativeTypeMapping(Types.DATE, "DATE", Types.TIMESTAMP);
+ info.addNativeTypeMapping(Types.DECIMAL, "NUMBER");
+ info.addNativeTypeMapping(Types.DISTINCT, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.DOUBLE, "DOUBLE PRECISION");
+ info.addNativeTypeMapping(Types.FLOAT, "FLOAT", Types.DOUBLE);
+ info.addNativeTypeMapping(Types.JAVA_OBJECT, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.LONGVARBINARY, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.LONGVARCHAR, "CLOB", Types.CLOB);
+ info.addNativeTypeMapping(Types.NULL, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.NUMERIC, "NUMBER", Types.DECIMAL);
+ info.addNativeTypeMapping(Types.INTEGER, "NUMBER", Types.DECIMAL);
+ info.addNativeTypeMapping(Types.OTHER, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.REF, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.SMALLINT, "NUMBER(5)");
+ info.addNativeTypeMapping(Types.STRUCT, "BLOB", Types.BLOB);
+ info.addNativeTypeMapping(Types.TIME, "DATE", Types.TIMESTAMP);
+ info.addNativeTypeMapping(Types.TIMESTAMP, "DATE");
+ info.addNativeTypeMapping(Types.TINYINT, "NUMBER(3)");
+ info.addNativeTypeMapping(Types.VARBINARY, "RAW");
+ info.addNativeTypeMapping(Types.VARCHAR, "VARCHAR2");
+ info.addNativeTypeMapping(Types.TIMESTAMP, "TIMESTAMP");
+ info.addNativeTypeMapping("BOOLEAN", "NUMBER(1)", "BIT");
+ info.addNativeTypeMapping("DATALINK", "BLOB", "BLOB");
+
+ info.setDefaultSize(Types.CHAR, 254);
+ info.setDefaultSize(Types.VARCHAR, 254);
+ info.setDefaultSize(Types.BINARY, 254);
+ info.setDefaultSize(Types.VARBINARY, 254);
+
+ info.setStoresUpperCaseNamesInCatalog(true);
+
+ this.jdbcModelReader = new OracleJdbcModelReader(this, dataSource);
+
+ }
+
+ @Override
+ public String getDefaultSchema() {
+ if (StringUtils.isBlank(this.defaultSchema)) {
+ this.defaultSchema = (String) new Template(dataSource).queryForObject(
+ "SELECT sys_context('USERENV', 'CURRENT_SCHEMA') FROM dual", String.class);
+ }
+ return defaultSchema;
+ }
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/IConnectionCallback.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/IConnectionCallback.java
new file mode 100644
index 0000000000..725aefb2d3
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/IConnectionCallback.java
@@ -0,0 +1,10 @@
+package org.jumpmind.symmetric.jdbc.sql;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+public interface IConnectionCallback {
+
+ public T execute(Connection con) throws SQLException;
+
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/StatementCreatorUtil.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/StatementCreatorUtil.java
new file mode 100644
index 0000000000..4d534b648c
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/StatementCreatorUtil.java
@@ -0,0 +1,336 @@
+package org.jumpmind.symmetric.jdbc.sql;
+
+/*
+ * Copyright 2002-2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.StringWriter;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.Calendar;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.jumpmind.symmetric.data.common.Log;
+import org.jumpmind.symmetric.data.common.LogFactory;
+import org.jumpmind.symmetric.data.common.LogLevel;
+
+public abstract class StatementCreatorUtil {
+
+ private static final Log log = LogFactory.getLog(StatementCreatorUtil.class);
+
+ private static final int UNKNOWN_TYPE = Integer.MIN_VALUE;
+
+ private static Map, Integer> javaTypeToSqlTypeMap = new HashMap, Integer>(32);
+
+ static {
+ /*
+ * JDBC 3.0 only - not compatible with e.g. MySQL at present
+ * javaTypeToSqlTypeMap.put(boolean.class, new Integer(Types.BOOLEAN));
+ * javaTypeToSqlTypeMap.put(Boolean.class, new Integer(Types.BOOLEAN));
+ */
+ javaTypeToSqlTypeMap.put(byte.class, Types.TINYINT);
+ javaTypeToSqlTypeMap.put(Byte.class, Types.TINYINT);
+ javaTypeToSqlTypeMap.put(short.class, Types.SMALLINT);
+ javaTypeToSqlTypeMap.put(Short.class, Types.SMALLINT);
+ javaTypeToSqlTypeMap.put(int.class, Types.INTEGER);
+ javaTypeToSqlTypeMap.put(Integer.class, Types.INTEGER);
+ javaTypeToSqlTypeMap.put(long.class, Types.BIGINT);
+ javaTypeToSqlTypeMap.put(Long.class, Types.BIGINT);
+ javaTypeToSqlTypeMap.put(BigInteger.class, Types.BIGINT);
+ javaTypeToSqlTypeMap.put(float.class, Types.FLOAT);
+ javaTypeToSqlTypeMap.put(Float.class, Types.FLOAT);
+ javaTypeToSqlTypeMap.put(double.class, Types.DOUBLE);
+ javaTypeToSqlTypeMap.put(Double.class, Types.DOUBLE);
+ javaTypeToSqlTypeMap.put(BigDecimal.class, Types.DECIMAL);
+ javaTypeToSqlTypeMap.put(java.sql.Date.class, Types.DATE);
+ javaTypeToSqlTypeMap.put(java.sql.Time.class, Types.TIME);
+ javaTypeToSqlTypeMap.put(java.sql.Timestamp.class, Types.TIMESTAMP);
+ javaTypeToSqlTypeMap.put(Blob.class, Types.BLOB);
+ javaTypeToSqlTypeMap.put(Clob.class, Types.CLOB);
+ }
+
+ public static void setValues(PreparedStatement ps, Object[] args) throws SQLException {
+ if (args != null) {
+ for (int i = 0; i < args.length; i++) {
+ Object arg = args[i];
+ doSetValue(ps, i + 1, arg);
+ }
+ }
+ }
+
+ /**
+ * Set the value for prepared statements specified parameter index using the
+ * passed in value. This method can be overridden by sub-classes if needed.
+ *
+ * @param ps
+ * the PreparedStatement
+ * @param parameterPosition
+ * index of the parameter position
+ * @param argValue
+ * the value to set
+ * @throws SQLException
+ */
+ public static void doSetValue(PreparedStatement ps, int parameterPosition, Object argValue)
+ throws SQLException {
+ setParameterValue(ps, parameterPosition, UNKNOWN_TYPE, argValue);
+ }
+
+ /**
+ * Derive a default SQL type from the given Java type.
+ *
+ * @param javaType
+ * the Java type to translate
+ * @return the corresponding SQL type, or null
if none found
+ */
+ public static int javaTypeToSqlParameterType(Class> javaType) {
+ Integer sqlType = javaTypeToSqlTypeMap.get(javaType);
+ if (sqlType != null) {
+ return sqlType;
+ }
+ if (Number.class.isAssignableFrom(javaType)) {
+ return Types.NUMERIC;
+ }
+ if (isStringValue(javaType)) {
+ return Types.VARCHAR;
+ }
+ if (isDateValue(javaType) || Calendar.class.isAssignableFrom(javaType)) {
+ return Types.TIMESTAMP;
+ }
+ return UNKNOWN_TYPE;
+ }
+
+ /**
+ * Set the value for a parameter. The method used is based on the SQL type
+ * of the parameter and we can handle complex types like arrays and LOBs.
+ *
+ * @param ps
+ * the prepared statement or callable statement
+ * @param paramIndex
+ * index of the parameter we are setting
+ * @param sqlType
+ * the SQL type of the parameter
+ * @param inValue
+ * the value to set (plain value or a SqlTypeValue)
+ * @throws SQLException
+ * if thrown by PreparedStatement methods
+ * @see SqlTypeValue
+ */
+ public static void setParameterValue(PreparedStatement ps, int paramIndex, int sqlType,
+ Object inValue) throws SQLException {
+
+ setParameterValueInternal(ps, paramIndex, sqlType, null, null, inValue);
+ }
+
+ /**
+ * Set the value for a parameter. The method used is based on the SQL type
+ * of the parameter and we can handle complex types like arrays and LOBs.
+ *
+ * @param ps
+ * the prepared statement or callable statement
+ * @param paramIndex
+ * index of the parameter we are setting
+ * @param sqlType
+ * the SQL type of the parameter
+ * @param typeName
+ * the type name of the parameter (optional, only used for SQL
+ * NULL and SqlTypeValue)
+ * @param inValue
+ * the value to set (plain value or a SqlTypeValue)
+ * @throws SQLException
+ * if thrown by PreparedStatement methods
+ * @see SqlTypeValue
+ */
+ public static void setParameterValue(PreparedStatement ps, int paramIndex, int sqlType,
+ String typeName, Object inValue) throws SQLException {
+
+ setParameterValueInternal(ps, paramIndex, sqlType, typeName, null, inValue);
+ }
+
+ /**
+ * Set the value for a parameter. The method used is based on the SQL type
+ * of the parameter and we can handle complex types like arrays and LOBs.
+ *
+ * @param ps
+ * the prepared statement or callable statement
+ * @param paramIndex
+ * index of the parameter we are setting
+ * @param sqlType
+ * the SQL type of the parameter
+ * @param typeName
+ * the type name of the parameter (optional, only used for SQL
+ * NULL and SqlTypeValue)
+ * @param scale
+ * the number of digits after the decimal point (for DECIMAL and
+ * NUMERIC types)
+ * @param inValue
+ * the value to set (plain value or a SqlTypeValue)
+ * @throws SQLException
+ * if thrown by PreparedStatement methods
+ * @see SqlTypeValue
+ */
+ private static void setParameterValueInternal(PreparedStatement ps, int paramIndex,
+ int sqlType, String typeName, Integer scale, Object inValue) throws SQLException {
+
+ String typeNameToUse = typeName;
+ int sqlTypeToUse = sqlType;
+ Object inValueToUse = inValue;
+
+ if (inValueToUse == null) {
+ setNull(ps, paramIndex, sqlTypeToUse, typeNameToUse);
+ } else {
+ setValue(ps, paramIndex, sqlTypeToUse, typeNameToUse, scale, inValueToUse);
+ }
+ }
+
+ /**
+ * Set the specified PreparedStatement parameter to null, respecting
+ * database-specific peculiarities.
+ */
+ private static void setNull(PreparedStatement ps, int paramIndex, int sqlType, String typeName)
+ throws SQLException {
+
+ if (sqlType == UNKNOWN_TYPE) {
+ boolean useSetObject = false;
+ sqlType = Types.NULL;
+ try {
+ DatabaseMetaData dbmd = ps.getConnection().getMetaData();
+ String databaseProductName = dbmd.getDatabaseProductName();
+ String jdbcDriverName = dbmd.getDriverName();
+ if (databaseProductName.startsWith("Informix")
+ || jdbcDriverName.startsWith("Microsoft SQL Server")) {
+ useSetObject = true;
+ } else if (databaseProductName.startsWith("DB2")
+ || jdbcDriverName.startsWith("jConnect")
+ || jdbcDriverName.startsWith("SQLServer")
+ || jdbcDriverName.startsWith("Apache Derby")) {
+ sqlType = Types.VARCHAR;
+ }
+ } catch (Throwable ex) {
+ log.log(LogLevel.DEBUG, "Could not check database or driver name", ex);
+ }
+ if (useSetObject) {
+ ps.setObject(paramIndex, null);
+ } else {
+ ps.setNull(paramIndex, sqlType);
+ }
+ } else if (typeName != null) {
+ ps.setNull(paramIndex, sqlType, typeName);
+ } else {
+ ps.setNull(paramIndex, sqlType);
+ }
+ }
+
+ private static void setValue(PreparedStatement ps, int paramIndex, int sqlType,
+ String typeName, Integer scale, Object inValue) throws SQLException {
+
+ if (sqlType == Types.VARCHAR || sqlType == Types.LONGVARCHAR
+ || (sqlType == Types.CLOB && isStringValue(inValue.getClass()))) {
+ ps.setString(paramIndex, inValue.toString());
+ } else if (sqlType == Types.DECIMAL || sqlType == Types.NUMERIC) {
+ if (inValue instanceof BigDecimal) {
+ ps.setBigDecimal(paramIndex, (BigDecimal) inValue);
+ } else if (scale != null) {
+ ps.setObject(paramIndex, inValue, sqlType, scale);
+ } else {
+ ps.setObject(paramIndex, inValue, sqlType);
+ }
+ } else if (sqlType == Types.DATE) {
+ if (inValue instanceof java.util.Date) {
+ if (inValue instanceof java.sql.Date) {
+ ps.setDate(paramIndex, (java.sql.Date) inValue);
+ } else {
+ ps.setDate(paramIndex, new java.sql.Date(((java.util.Date) inValue).getTime()));
+ }
+ } else if (inValue instanceof Calendar) {
+ Calendar cal = (Calendar) inValue;
+ ps.setDate(paramIndex, new java.sql.Date(cal.getTime().getTime()), cal);
+ } else {
+ ps.setObject(paramIndex, inValue, Types.DATE);
+ }
+ } else if (sqlType == Types.TIME) {
+ if (inValue instanceof java.util.Date) {
+ if (inValue instanceof java.sql.Time) {
+ ps.setTime(paramIndex, (java.sql.Time) inValue);
+ } else {
+ ps.setTime(paramIndex, new java.sql.Time(((java.util.Date) inValue).getTime()));
+ }
+ } else if (inValue instanceof Calendar) {
+ Calendar cal = (Calendar) inValue;
+ ps.setTime(paramIndex, new java.sql.Time(cal.getTime().getTime()), cal);
+ } else {
+ ps.setObject(paramIndex, inValue, Types.TIME);
+ }
+ } else if (sqlType == Types.TIMESTAMP) {
+ if (inValue instanceof java.util.Date) {
+ if (inValue instanceof java.sql.Timestamp) {
+ ps.setTimestamp(paramIndex, (java.sql.Timestamp) inValue);
+ } else {
+ ps.setTimestamp(paramIndex,
+ new java.sql.Timestamp(((java.util.Date) inValue).getTime()));
+ }
+ } else if (inValue instanceof Calendar) {
+ Calendar cal = (Calendar) inValue;
+ ps.setTimestamp(paramIndex, new java.sql.Timestamp(cal.getTime().getTime()), cal);
+ } else {
+ ps.setObject(paramIndex, inValue, Types.TIMESTAMP);
+ }
+ } else if (sqlType == UNKNOWN_TYPE) {
+ if (isStringValue(inValue.getClass())) {
+ ps.setString(paramIndex, inValue.toString());
+ } else if (isDateValue(inValue.getClass())) {
+ ps.setTimestamp(paramIndex,
+ new java.sql.Timestamp(((java.util.Date) inValue).getTime()));
+ } else if (inValue instanceof Calendar) {
+ Calendar cal = (Calendar) inValue;
+ ps.setTimestamp(paramIndex, new java.sql.Timestamp(cal.getTime().getTime()), cal);
+ } else {
+ // Fall back to generic setObject call without SQL type
+ // specified.
+ ps.setObject(paramIndex, inValue);
+ }
+ } else {
+ // Fall back to generic setObject call with SQL type specified.
+ ps.setObject(paramIndex, inValue, sqlType);
+ }
+ }
+
+ /**
+ * Check whether the given value can be treated as a String value.
+ */
+ private static boolean isStringValue(Class> inValueType) {
+ // Consider any CharSequence (including StringBuffer and StringBuilder)
+ // as a String.
+ return (CharSequence.class.isAssignableFrom(inValueType) || StringWriter.class
+ .isAssignableFrom(inValueType));
+ }
+
+ /**
+ * Check whether the given value is a java.util.Date
(but not
+ * one of the JDBC-specific subclasses).
+ */
+ private static boolean isDateValue(Class> inValueType) {
+ return (java.util.Date.class.isAssignableFrom(inValueType) && !(java.sql.Date.class
+ .isAssignableFrom(inValueType) || java.sql.Time.class.isAssignableFrom(inValueType) || java.sql.Timestamp.class
+ .isAssignableFrom(inValueType)));
+ }
+
+}
diff --git a/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/Template.java b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/Template.java
new file mode 100644
index 0000000000..13be8f8b7a
--- /dev/null
+++ b/future/symmetric-jdbc/src/main/java/org/jumpmind/symmetric/jdbc/sql/Template.java
@@ -0,0 +1,89 @@
+package org.jumpmind.symmetric.jdbc.sql;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import org.jumpmind.symmetric.data.process.sql.DataException;
+
+public class Template {
+
+ DataSource dataSource;
+
+ public Template(DataSource dataSource) {
+ this.dataSource = dataSource;
+ }
+
+ public T queryForObject(final String sql, Class clazz, final Object... args) {
+ return execute(new IConnectionCallback() {
+ @SuppressWarnings("unchecked")
+ @Override
+ public T execute(Connection con) throws SQLException {
+ T result = null;
+ PreparedStatement ps = null;
+ ResultSet rs = null;
+ try {
+ ps = con.prepareStatement(sql);
+ StatementCreatorUtil.setValues(ps, args);
+ rs = ps.executeQuery(sql);
+ if (rs.next()) {
+ result = (T) rs.getObject(1);
+ }
+ } finally {
+ close(rs);
+ close(ps);
+ }
+ return result;
+ }
+ });
+ }
+
+
+ public T execute(IConnectionCallback callback) {
+ Connection c = null;
+ try {
+ c = dataSource.getConnection();
+ return callback.execute(c);
+ } catch (SQLException ex) {
+ throw translate(ex);
+ } finally {
+ close(c);
+ }
+ }
+
+ public static void close(ResultSet rs) {
+ try {
+ if (rs != null) {
+ rs.close();
+ }
+ } catch (SQLException ex) {
+ }
+ }
+
+ public static void close(PreparedStatement ps) {
+ try {
+ if (ps != null) {
+ ps.close();
+ }
+ } catch (SQLException ex) {
+ }
+ }
+
+ public static void close(Connection c) {
+ try {
+ if (c != null) {
+ c.close();
+ }
+ } catch (SQLException ex) {
+ }
+ }
+
+ public DataException translate(SQLException ex) {
+ // TODO check for integrity error
+ return new DataException(ex);
+ }
+
+}