diff --git a/symmetric-assemble/asciidoc.gradle b/symmetric-assemble/asciidoc.gradle index 0c4a508673..d83212a9a2 100644 --- a/symmetric-assemble/asciidoc.gradle +++ b/symmetric-assemble/asciidoc.gradle @@ -133,6 +133,7 @@ task publishDoc { ant.scp( todir: "$publishUser:$publishPassword@$publishServer:~/www/doc/$majorMinorVersion", + port: "$publishPort", trust: 'true', verbose: 'true') { fileset(dir: "$buildDir/doc") { include(name: '**/**') diff --git a/symmetric-assemble/gradle.properties b/symmetric-assemble/gradle.properties index 7074b0ffb1..de8b604fd0 100644 --- a/symmetric-assemble/gradle.properties +++ b/symmetric-assemble/gradle.properties @@ -7,6 +7,7 @@ deployUploadUrl=? docFormat=html5 publishUser=? publishPassword=? +publishPort=? publishServer=symmetricds.org sourceforgeUser=? sourceforgePassword=? diff --git a/symmetric-assemble/src/asciidoc/advanced-topics.ad b/symmetric-assemble/src/asciidoc/advanced-topics.ad index bbb5b2c154..5645a35b32 100644 --- a/symmetric-assemble/src/asciidoc/advanced-topics.ad +++ b/symmetric-assemble/src/asciidoc/advanced-topics.ad @@ -296,6 +296,18 @@ keytool -keystore cacerts -import -alias sym -file sym.cer * Copy the cacerts file that is generated by this process to the `security` directory of each client's SymmetricDS installation. +==== Importing Signed Certificates from PKCS 12 files + +You would use the following command to import a p12 certificate into the SymmetricDS keystore: + +[source, cli] +---- +keytool -delete -alias sym -noprompt -keystore keystore -storetype jceks -storepass changeit + +keytool -importkeystore -deststorepass changeit -destkeypass changeit -destkeystore keystore -storetype jceks -srckeystore {yourcert.p12} -srcstoretype PKCS12 -srcstorepass {pkcs12 password} -srcalias {pkcs12 alias} -destalias sym +---- + + ==== Changing Keystore Password The keystore and each key entry is protected with a password that defaults to "changeit". To change the password, use the following steps: diff --git a/symmetric-assemble/src/asciidoc/appendix/db2.ad b/symmetric-assemble/src/asciidoc/appendix/db2.ad index 0d69ef0ac7..5a6b4a7a2c 100644 --- a/symmetric-assemble/src/asciidoc/appendix/db2.ad +++ b/symmetric-assemble/src/asciidoc/appendix/db2.ad @@ -47,7 +47,13 @@ CREATE SYSTEM TEMPORARY TABLESPACE tmp_tbsp |DecFloat|No |Binary, VarBinary|No |=== - + +By default DB2 will not capture the transaction id associated with the captured data. This can be turned on with the following parameter. + +---- +db2.capture.transaction.id=false +---- + === DB2 for IBM i The DB2 for IBM i dialect can connect to a database on IBM iSeries (AS/400) machines. It was tested with the http://jt400.sourceforge.net/[jt400 JDBC driver], which is already included in the SymmetricDS download. diff --git a/symmetric-assemble/src/assembly/assembly-android.xml b/symmetric-assemble/src/assembly/assembly-android.xml deleted file mode 100644 index c2a876d1ed..0000000000 --- a/symmetric-assemble/src/assembly/assembly-android.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - android - false - - zip - - - - . - / - - COPYING - CREDITS - NOTICE - - - - - - - org.jumpmind.symmetric:symmetric-android - - - false - /libs - - - false - runtime - - - - - - - \ No newline at end of file diff --git a/symmetric-assemble/src/assembly/assembly-client.xml b/symmetric-assemble/src/assembly/assembly-client.xml deleted file mode 100644 index efd11f2684..0000000000 --- a/symmetric-assemble/src/assembly/assembly-client.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - client - - zip - - - - . - / - - COPYING - CREDITS - NOTICE - - - - - - - org.jumpmind.symmetric:symmetric-client - - - false - /lib - - - false - runtime - - - /optional - false - provided - false - - - - - - - \ No newline at end of file diff --git a/symmetric-assemble/src/assembly/assembly-docbook.xml b/symmetric-assemble/src/assembly/assembly-docbook.xml deleted file mode 100644 index f13574e26e..0000000000 --- a/symmetric-assemble/src/assembly/assembly-docbook.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - docbook - - zip - - - - target/docbook - / - - ** - - - - - \ No newline at end of file diff --git a/symmetric-assemble/src/assembly/assembly-server.xml b/symmetric-assemble/src/assembly/assembly-server.xml deleted file mode 100644 index 407d4a6b7e..0000000000 --- a/symmetric-assemble/src/assembly/assembly-server.xml +++ /dev/null @@ -1,151 +0,0 @@ - - - server - - zip - - - - . - / - - COPYING - CREDITS - NOTICE - - - - ../symmetric-server/src/main/deploy - / - - bin/sym - bin/symadmin - bin/dbexport - bin/dbimport - bin/dbfill - bin/jmx - bin/dbsql - bin/sym_service - - - - target/doc - /doc - - - - - ../symmetric-server/src/main/deploy/bin/sym - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/symadmin - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/setenv - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/dbexport - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/dbimport - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/dbfill - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/jmx - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/dbsql - /bin - 755 - - - ../symmetric-server/src/main/deploy/bin/sym_service - /bin - 755 - - - - - - org.jumpmind.symmetric:symmetric-server - - - /web/WEB-INF/lib - - org.jumpmind.symmetric:symmetric-server - - false - - - /web/WEB-INF/lib - false - runtime - true - - log4j:log4j - - - - /lib - false - provided - false - true - - junit:junit - org.jumpmind.symmetric:symmetric-wrapper - - - - /lib - false - provided - symmetric-wrapper.jar - false - true - - org.jumpmind.symmetric:symmetric-wrapper - - - - - - - \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/advanced-topics.xml b/symmetric-assemble/src/docbook/advanced-topics.xml deleted file mode 100644 index 1fc3d51748..0000000000 --- a/symmetric-assemble/src/docbook/advanced-topics.xml +++ /dev/null @@ -1,808 +0,0 @@ - - - - Advanced Topics - - This chapter focuses on a variety of topics, including deployment options, jobs, clustering, encryptions, synchronization control, - and configuration of SymmetricDS. - -
- Advanced Synchronization -
- Bi-Directional Synchronization - - SymmetricDS allows tables to be synchronized bi-directionally. Note that an outgoing - synchronization does not process changes during an incoming synchronization on the same node unless the trigger - was created with the sync_on_incoming_batch flag set. If the sync_on_incoming_batch flag - is set, then update loops are prevented by a feature that is available in most database dialects. - More specifically, during an incoming synchronization the source node_id is put into a database session variable that is - available to the database trigger. Data events are not generated if the target node_id - on an outgoing synchronization is equal to the source node_id. - - - By default, only the columns that changed will be updated in the target system. - - - Conflict resolution strategies can be configured for specific links and/or sets of tables. - -
- -
- Multi-Tiered Synchronization - - There may be scenarios where data needs to flow through multiple tiers of nodes that - are organized in a tree-like network with each tier requiring a different subset of data. For example, - you may have a system where the lowest tier may be a computer or device located in a store. Those devices - may connect to a server located physically at that store. Then the store server may communicate with - a corporate server for example. In this case, the three tiers would be device, store, and corporate. - Each tier is typically represented by a node group. Each node in - the tier would belong to the node group representing that tier. - - - A node can only pull and push data to other nodes that are represented in the node's - table and in cases where that node's sync_enabled column is set to 1. - Because of this, a tree-like - hierarchy of nodes can be created by having only a subset of nodes belonging to the same node group represented at the different branches of the tree. - - - If auto registration is turned off, then this setup must occur manually by opening registration - for the desired nodes at the desired parent node and by configuring each node's registration.url - to be the parent node's URL. - The parent node is always tracked by the setting of the parent's node_id in the created_at_node_id column of the new node. - When a node registers and downloads its configuration it is always provided the configuration for nodes - that might register with the node itself based on the Node Group Links defined in the parent node. - - - -
- Registration Redirect - - When deploying a multi-tiered system it may be advantageous to have only one registration server, even though the parent node of a registering node - could be any of a number of nodes in the system. In SymmetricDS the parent node is always the node that a child registers with. The - table allows a single node, usually the root server in the network, to - redirect registering nodes to their true parents. It does so based on a mapping found in the table of the external id (registrant_external_id) to the parent's node - id (registration_node_id). - - - For example, if it is desired to have a series of regional servers that workstations at retail stores get assigned to based on their external_id, the store number, then - you might insert into the store number as the registrant_external_id and the node_id of - the assigned region as the registration_node_id. When a workstation at the store registers, the root server sends an HTTP redirect to the sync_url of the node - that matches the registration_node_id. - - - Please see for important details around initial loads - and registration when using registration redirect. - - - -
-
-
- - -
- Deployment Options - An instance of SymmetricDS can be deployed in several ways: - - - Web application archive (WAR) deployed to an application server - - This option means packaging a WAR file and deploying to your favorite - web server, like Apache Tomcat. It's a little more work, but you - can configure the web server to do whatever you need. SymmetricDS can also - be embedded in an existing web application, if desired. - - - - Standalone service that embeds Jetty web server - - This option means running the sym command line, which launches the built-in Jetty web server. - This is a simple option because it is already provided, but you lose the flexibility to configure - the web server any further. - - - - Embedded as a Java library in an application - - This option means you must write a wrapper Java program that runs - SymmetricDS. You would probably use Jetty web server, which is also embeddable. - You could bring up an embedded database like Derby or H2. You could configure the - web server, database, or SymmetricDS to do whatever you needed, but it's also - the most work of the three options discussed thus far. - - - - - The deployment model you choose depends on how much flexibility you need versus how easy you - want it to be. Both Jetty and Tomcat are excellent, scalable web servers that - compete with each other and have great performance. Most people choose either - the Standalone or Web Archive with Tomcat 5.5 or 6. Deploying to Tomcat - is a good middle-of-the-road decision that requires a little more work for more flexibility. - - Next, we will go into a little more detail on the first three deployment options listed above. -
- Web Archive (WAR) - - As a web application archive, a WAR is deployed to an application server, - such as Tomcat, Jetty, or JBoss. The structure of the archive will have a web.xml - file in the WEB-INF folder, an appropriately configured symmetric.properties file in the WEB-INF/classes folder, - and the required JAR files in the WEB-INF/lib folder. - - - - - - - - A war file can be generated using the standalone installation's symadmin utility and the - create-war subcommand. The command requires the name of the war file to generate. It - essentially packages up the web directory, the conf directory and includes an optional - properties file. Note that if a properties file is included, it will be copied to - WEB-INF/classes/symmetric.properties. This is the same location conf/symmetric.properties - would have been copied to. The generated war distribution uses the same web.xml as the standalone - deployment. - - - ../bin/symadmin -p my-symmetric-ds.properties create-war /some/path/to/symmetric-ds.war - -
-
- Embedded - - A Java application with the SymmetricDS Java Archive (JAR) library on its - classpath can use the SymmetricWebServer to start the server. - - - - This example starts the SymmetricDS server on port 8080. - The configuration properties file, my-application.properties, - is packaged in the application to provide properties that override the SymmetricDS - default values. The second parameter to the constructor points to the web directory. - The default location is ../web. In this example the web directory is located - at conf/web_dir. The web.xml is expected to be found at conf/web_dir/WEB-INF/web.xml. - -
-
- Standalone - - The sym command line utility starts a standalone web server with - SymmetricDS pre-deployed. The standalone server uses an embedded instance of the - Jetty application server to handle web requests. The web server can be configured - using command line options or the web server can be configured by changing properties in the - conf/symmetric-server.properties file. - - - The following example starts the SymmetricDS server on port 8080 with the startup - properties found in the root.properties file. - - - - Even though the port and properties settings can be passed in on the command line, the preferred - configuration approach is to put each hosted node's properties file in the engines directory - and to modify port settings and enable secure mode using the conf/symmetric-server.properties. - - - It is also suggested that SymmetricDS be configured to run as a service according to the instructions for your platform as documented in the following section. - -
-
- -
- Running SymmetricDS as a Service - - SymmetricDS can be configured to start automatically when the system boots, running as a Windows service or Linux/Unix daemon. - A wrapper process starts SymmetricDS and monitors it, so it can be restarted if it runs out of memory or exits unexpectedly. - The wrapper writes standard output and standard error to the logs/wrapper.log file. - -
- Running as a Windows Service - - To install the service, run the following command as Administrator: - bin\sym_service.bat install - - - Most configuration changes do not require the service to be re-installed. - To un-install the service, run the following command as Administrator: - bin\sym_service.bat uninstall - - - To start and stop the service manually, run the following commands as Administrator: - bin\sym_service.bat start -bin\sym_service.bat stop - -
-
- Running as a Linux/Unix daemon - - An init script is written to the system /etc/init.d directory. - Symbolic links are created for starting on run levels 2, 3, and 5 and stopping on run levels 0, 1, and 6. - To install the script, running the following command as root: - bin/sym_service install - - - Most configuration changes do not require the service to be re-installed. - To un-install the service, run the following command as root: - bin/sym_service uninstall - - - To start and stop the service manually, run the following commands: - bin/sym_service start -bin/sym_service stop - -
-
-
- Clustering - - A single SymmetricDS node may be clustered across a series of instances, creating a web farm. A node might be clustered to provide load balancing and failover, for example. - - - When clustered, a hardware load balancer is typically used - to round robin client requests to the cluster. The load balancer should be configured for stateless connections. - Also, the sync.url (discussed in ) - SymmetricDS property should be set to the URL of the load balancer. - - - If the cluster will be running any of the SymmetricDS jobs, then the cluster.lock.enabled property should be set to true. - By setting this property to true, SymmetricDS will use a row in the table as a semaphore to make sure that only one instance at a time - runs a job. When a lock is acquired, a row is updated in the lock table with the time of the lock and the server id of the locking job. The lock time is set back to null - when the job is finished running. Another instance of SymmetricDS cannot aquire a lock until the locking instance (according to the server id) releases the lock. If an - instance is terminated while the lock is still held, an instance with the same server id is allowed to reaquire the lock. If the locking instance remains down, the lock can be - broken after a period of time, specified by the cluster.lock.timeout.ms property, has expired. Note that if the job is still running and the lock - expires, two jobs could be running at the same time which could cause database deadlocks. - - - By default, the locking server id is the hostname of the server. If two clustered instances are running on the same server, then the cluster.server.id property - may be set to indicate the name that the instance should use for its server id. - - - When deploying SymmetricDS to an application server like Tomcat or JBoss, no special session clustering needs to be configured for the application server. - -
-
- Encrypted Passwords - - The db.user and db.password properties will accept encrypted text, which protects - against casual observation. The text is prefixed with enc: to indicate - that it is encrypted. To encrypt text, use the following command: - - symadmin -e {engine name} encrypt-text text-to-encrypt - or - symadmin -p {properties file} encrypt-text text-to-encrypt - - The text is encrypted using a secret key named "sym.secret" that is retrieved from a keystore file. - By default, the keystore is located in security/keystore. - The location and filename of the keystore can be overridden by setting the "sym.keystore.file" system property. - If the secret key is not found, the system will generate and install a secret key for use with Triple DES cipher. - - - Generate a new secret key for encryption using the keytool - command that comes with the JRE. If there is an existing key in the keystore, first remove it: - - keytool -keystore keystore -storepass changeit -storetype jceks \ - -alias sym.secret -delete - - Then generate a secret key, specifying a cipher algorithm and key size. - Commonly used algorithms that are supported include aes, blowfish, desede, and rc4. - - keytool -keystore keystore -storepass changeit -storetype jceks \ - -alias sym.secret -genseckey -keyalg aes -keysize 128 - - If using an alternative provider, place the provider JAR file in the SymmetricDS lib folder. - The provider class name should be installed in the JRE security properties or specified on the command line. - To install in the JRE, edit the JRE lib/security/java.security file - and set a security.provider.i property for the provider class name. - Or, the provider can be specified on the command line instead. - Both keytool and sym accept command line arguments for the provider class name. - For example, using the Bouncy Castle provider, the command line options would look like: - - keytool -keystore keystore -storepass changeit -storetype jceks \ - -alias sym.secret -genseckey -keyalg idea -keysize 56 \ - -providerClass org.bouncycastle.jce.provider.BouncyCastleProvider \ - -providerPath ..\lib\bcprov-ext.jar - symadmin -providerClass org.bouncycastle.jce.provider.BouncyCastleProvider -e secret - - To customize the encryption, write a Java class that implements the ISecurityService or extends the default SecurityService, and place - the class on the classpath in either lib or - web/WEB-INF/lib folders. - Then, in the symmetric.properties specify your class name for the security service. - - security.service.class.name=org.jumpmind.security.SecurityService - - Remember to specify your properties file when encrypting passwords, so it will use your custom ISecurityService. - - symadmin -p symmetric.properties -e secret - -
-
- Secure Transport - - By specifying the "https" protocol for a URL, SymmetricDS will communicate over - Secure Sockets Layer (SSL) for an encrypted transport. The following properties - need to be set with "https" in the URL: - - - - sync.url - - - - This is the URL of the current node, so if you want to force other - nodes to communicate over SSL with this node, you specify "https" in the URL. - - - - - - registration.url - - - - This is the URL where the node will connect for registration when it - first starts up. To protect the registration with SSL, you specify - "https" in the URL. - - - - - For incoming HTTPS connections, SymmetricDS depends on the webserver where - it is deployed, so the webserver must be configured for HTTPS. - As a standalone deployment, the "sym" launcher command provides options for - enabling HTTPS support. - -
- Sym Launcher - - The "sym" launch command uses Jetty as an embedded web server. - Using command line options, the web server can be told to listen for - HTTP, HTTPS, or both. - - - sym --port 8080 --server - - - sym --secure-port 8443 --secure-server - - - sym --port 8080 --secure-port 8443 --mixed-server - -
-
- Tomcat - - If you deploy SymmetricDS to Apache Tomcat, it can be secured by editing the - TOMCAT_HOME/conf/server.xml - configuration file. There is already a line that can be uncommented - and changed to the following: - - -]]> - -
-
- Keystores - - When SymmetricDS connects to a URL with HTTPS, Java checks the validity of the - certificate using the built-in trusted keystore located at - JRE_HOME/lib/security/cacerts. - The "sym" launcher command overrides the trusted keystore to use its own - trusted keystore instead, which is located at - security/cacerts. - This keystore contains the certificate aliased as "sym" for use in testing - and easing deployments. - The trusted keystore can be overridden - by specifying the javax.net.ssl.trustStore system property. - - - When SymmetricDS is run as a secure server with the "sym" launcher, - it accepts incoming requests using the key installed in the keystore - located at - security/keystore. - The default key is provided for convenience of testing, but should be - re-generated for security. - -
-
- Generating Keys - - To generate new keys and install a server certificate, use the - following steps: - - - - - Open a command prompt and navigate to the - security - subdirectory of your SymmetricDS installation on the server to which - communication will be secured (typically the "root" or "central office" server). - - - - Delete the old key pair and certificate. - - keytool -keystore keystore -delete -alias sym - - - keytool -keystore cacerts -delete -alias sym - - - - - Generate a new key pair. Note that the first name/last name (the "CN") must match - the fully qualified hostname the client will be using to communcate to the server. - - keytool -keystore keystore -alias sym -genkey -keyalg RSA -validity 10950 - - - - (RETURN if same as keystore password):]]> - - - Export the certificate from the private keystore. - - keytool -keystore keystore -export -alias sym -rfc -file sym.cer - - - - Install the certificate in the trusted keystore. - - keytool -keystore cacerts -import -alias sym -file sym.cer - - - - Copy the cacerts file that is generated by this process to - the security directory of each client's SymmetricDS installation. - - -
-
-
- Basic Authentication - - SymmetricDS supports basic authentication for client and server nodes. - - - To configure a client node to use basic authentication when communicating with a server node, - specify the following startup parameters: - - - - - http.basic.auth.username - - - - username for client node basic authentication. - [ Default: ] - - - - - - http.basic.auth.password - - - - password for client node basic authentication. - [ Default: ] - - - - - - The SymmetricDS Standalone Web Server also supports Basic Authentication. It can be enabled by - passing the following arguments to the startup program - - - - - --http-basic-auth-user - - - - username for basic authentication - [ Default: ] - - - - - - --http-basic-auth-password - - - - password for basic authentication - [ Default: ] - - - - - - If the server node is deployed to Tomcat or another application server as a WAR or EAR file, then - basic authentication is setup with the standard configuration in the WEB.xml file. - -
-
- Data Loaders - - SymmetricDS supports the concept of pluggable data loaders. A data loader defines how data is loaded into a target - datasource. The default data loader for SymmetricDS loads data to the relational database that is represented by the SymmetricDS node. - Data loaders do not always have to load into the target relational database. They can write to - a file, a web service, or any other type of non-relational data source. Data loaders can also use other techniques to increase performance - of data loads into the target relation database. Data loaders are pluggable at the - level. They are configured by setting the data_loader_type on - the channel table. - -
- Bulk Data Loaders - - To use the preconfigured bulk data loaders, - you set the data_loader_type on a channel to one of the following: - - mysql_bulk - mssql_bulk - postgres_bulk - oracle_bulk - - Tables that should be data loaded should be configured to use this channel. Many times, a reload channel will - be set to bulk load to increase the performance of an initial load. - -
-
- MongoDB - - The MongoDB data loader maps relational database rows to MongoDB documents in collections. To use the preconfigured MongoDB data loader, - you set the data_loader_type to MongoDB on a channel. - Tables that should be synchronized to MongoDB should be configured to use this channel. - In order to point it to a MongoDB instance - set the following properties in the engines properties file. - - - - By default, the catalog or schema passed by SymmetricDS will be used for the MongoDB database name. The table passed by SymmetricDS - will be used as the MongoDB collection name. If the catalog or schema are not set, the default database name property is used as the - database name. - - - The _id of the MongoDB document will be the primary key of the database record. If the table has a composite primary key, then the - _id will be an embedded document that has name value pairs of the composite key. The body of the document will be name value pairs - of the table column name and table row value. - - - SymmetricDS uses the MongoDB Java Driver to upsert documents. - - - SymmetricDS transforms can be used to transform the data. If a complex mapping is required that is not supported by transforms, then - the IDBObjectMapper can be implemented and a new MongoDataLoaderFactory can be wired up - as an extension point. - -
-
- -
- Java Management Extensions - - Monitoring and administrative operations can be performed using Java Management Extensions (JMX). - SymmetricDS uses MX4J to expose JMX attributes and operations that can be accessed - from the built-in web console, Java's jconsole, or an application server. - By default, the web management console can be opened from the following address: - - - - In order to use jconsole, you must enable JMX remote management in the JVM. You can edit the startup scripts to set the following system - parameters. - - - - More details about enabling JMX for JConsole can be found here. - - - Using the Java jconsole command, SymmetricDS is listed as a local process named SymmetricLauncher. - In jconsole, SymmetricDS appears under the MBeans tab under the name defined by the engine.name - property. The default value is SymmetricDS. - - - The management interfaces under SymmetricDS are organized as follows: - - - - Node - administrative operations - - - Parameters - access to properties set through the parameter service - - - - -
-
- JMS Publishing - - With the proper configuration SymmetricDS can publish XML messages of captured data changes to - JMS during routing or transactionally while data loading synchronized data into a target database. - The following explains how to publish to JMS during synchronization to the target database. - - - The XmlPublisherDatabaseWriterFilter is a - that may be configured to - publish specific tables as an XML message to a JMS provider. - See for information on how - to configure an extension point. If the publish to JMS fails, the batch will be marked in error, - the loaded data for the batch will be rolled back - and the batch will be retried during the next synchronization run. - - - The following is an example extension point configuration that will publish four tables in XML with a root - tag of 'sale'. Each XML message will be grouped by the batch and the column names identified by - the groupByColumnNames property which have the same values. - - - - - - - - SALE_TX - SALE_LINE_ITEM - SALE_TAX - SALE_TOTAL - - - - - STORE_ID - BUSINESS_DAY - WORKSTATION_ID - TRANSACTION_ID - - - - - - - - -]]> - - - The publisher property on the XmlPublisherDatabaseWriterFilter takes an interface of type IPublisher. The implementation - demonstrated here is an implementation that publishes to JMS using Spring's - JMS template. - Other implementations of IPublisher could easily publish the XML to other targets like an HTTP server, the file system or secure copy it to another server. - - - The above configuration will publish XML similar to the following: - - - - 001 - 2010-01-22 - 003 - 1234 - 010110 - - - 001 - 2010-01-22 - 003 - 1234 - 9999999 - 10.00 - - - - 001 - 2010-01-22 - 003 - 1234 - 9999999 - 10.00 - - - - 001 - 2010-01-22 - 003 - 1234 - 1.33 - - - 001 - 2010-01-22 - 003 - 1234 - 21.33 - - - ]]> - To publish JMS messages during routing - the same pattern is valid, with the exception that the extension point would be the XmlPublisherDataRouter and the router - would be configured by setting the router_type of a to the Spring bean - name of the registered extension point. Of course, the router would need to be linked through s - to each table that needs published. - -
-
diff --git a/symmetric-assemble/src/docbook/android.xml b/symmetric-assemble/src/docbook/android.xml deleted file mode 100644 index 2073a3c03e..0000000000 --- a/symmetric-assemble/src/docbook/android.xml +++ /dev/null @@ -1,265 +0,0 @@ - - -
- Embedding in Android - - - SymmetricDS now has its web-enabled, fault-tolerant, database - synchronization software available on the Android mobile computing - platform. The Android client follows all of the same concepts and - brings to Android all of the same core SymmetricDS features as the - full-featured, Java-based SymmetricDS client. The Android client is a - little bit different in that it is not a stand-alone application, but - is designed to be referenced as a library to run in-process with an - Android application requiring synchronization for its SQLite database. - - - By using SymmetricDS, mobile application development is - simplified, in that the mobile application developer can now focus - solely on interacting with their local SQLite database. SymmetricDS - takes care of capturing and moving data changes to and from a - centralized database when the network is available - - - The same core libraries that are used for the SymmetricDS server are - also used for Android. SymmetricDS's overall footprint is - reduced by eliminating a number of external dependencies in order to - fit better on an Android device. The database access layer is - abstracted so that the Android specific database access layer could be - used. This allows SymmetricDS to be efficient in accessing the SQLite - database on the Android device. - - - In order to convey how to use the SymmetricDS Android libraries, - the example below will show how to integrate SymmetricDS into the NotePad - sample application that comes with the Android ADK. - - - The NotePad sample application is a very simple task list - application that persists notes to a SQLite database table called - Notes. Eclipse 3.7.2 and Android ADK 20.0.3 were used for this - example. - - - Create the NotePad project. You do - this by adding a new Android Sample Project. Select the NotePad - project. - - -
- New Sample NotePad Project - - - - - -
-
- - SymmetricDS for Android comes as a zip file of Java archives (jar - files) that are required by the SymmetricDS client at runtime. This - zip file ()symmetric-ds-3.4.7-android.zip) can be downloaded from - the SymmetricDS.org website. The first step to using SymmetricDS in an Android - application is to unzip the jar files into a location where the - project will recognize them. The latest Android SDK and the Eclipse - ADK requires that these jar files be put into a libs directory under - the Android application project. - - -
- New Sample NotePad Project - - - - - -
-
- - Unzip the symmetric-ds-x.x.x-android.zip file to the NotePad - project directory. Refresh the NotePad project in Eclipse. You should - end up with a libs directory that is automatically added to the Android - Dependencies. - - -
- Jar Files Added to Libs - - - - - -
-
- - The Android version of the SymmetricDS engine is a Java class that can - be - instantiated directly or wired into an application via a provided - Android service. Whether you are using the service or the engine - directly you need to provide a few required startup parameters to the - engine: - - SQLiteOpenHelper It is best (but not required) if the - SQLiteOpenHelper is shared with the application that will be sharing - the SQLite database. This core Android Java class provides software - synchronization around the access to the database and minimizes - locking errors. - - registrationUrl This is the URL of where the centralized - SymmetricDS instance is hosted. - - externalId This is the identifier that can be used by the - centralized SymmetricDS server to identify whether this instance - should get data changes that happen on the server. It could be the - serial number of the device, an account username, or some other - business concept like store number. - - nodeGroupId This is the group id for the mobile device in - the synchronization configuration. For example, if the nodeGroupId - is 'handheld', then the SymmetricDS configuration might have a group - called 'handheld' and a group called 'corp' where 'handheld' is - configured to push and pull data from 'corp.' - - properties Optionally tweak the settings for SymmetricDS. - - - - - In order to integrate SymmetricDS into the NotePad application, - the Android-specific SymmetricService will be used, and we need to tell - the Android application this by adding the service to the AndroidManifest.xml file. Add the following snipped to the Manifest as - the last entry under the ]]> - tag. - - - - - - - -]]> - - - - - The other change required in the Manifest is to give the application - permission to use the Internet. Add this as the first entry in the - AndroidManifest.xml right before the - tag]]>. - - - - - ]]> - - - - - The only additional change needed is the call to start the service in the - application. The service needs to be started manually because we need - to give the application a chance to provide configuration information - to the service. - - - - In NotePadProvider.java add the following code snippet in the onCreate - method. - - - -
- NotePadProvider.java - - - - - -
-
- - - - - - - - - This code snippet shows how the SQLiteOpenHelper is shared. The - application's SQLiteOpenHelper is registered in a static registry - provided by the SymmetricDS Android library. When the service is - started, the key used to store the helper is passed to the service so - that the service may pull the helper back out of the registry. - - - - The various parameters needed by SymmetricDS are being set in the Intent - which will be used by the SymmetricService to start the engine. - - - - Most of the parameters will be familiar to SymmetricDS users. In this case - a property is being set which will force an initial load of the - existing Notes from the client to the server. This allows the user of - the application to enter Notes for the first time offline or while the - SymmetricDS engine is unregistered and still have them arrive at the - centralized server once the SymmetricDS engine does get registered. - - - - Next, set up an Android Emulator. This can be done by opening the Android Virtual Device Manager. - Click New and follow the steps. The higher the Emulator's API, the better. - - - - Run your NotePad project by pressing Run on NotePadProvider.java in Eclipse. - When prompted, select the emulator you just created. Monitor the Console in Eclipse. - Let the NotePad.apk install on the emulator. - Now watch the LogCat and wait as it attempts to register with your SymmetricDS Master Node. - - -
diff --git a/symmetric-assemble/src/docbook/configuration.xml b/symmetric-assemble/src/docbook/configuration.xml deleted file mode 100644 index 4bc3d4b1f3..0000000000 --- a/symmetric-assemble/src/docbook/configuration.xml +++ /dev/null @@ -1,1753 +0,0 @@ - - - - - Configuration - - - - -
- Table Triggers - - - SymmetricDS captures synchronization data using database triggers. - SymmetricDS' Triggers are defined in the - - table. Each record is used by SymmetricDS when generating database - triggers. Database triggers are only generated when a trigger is - associated with a - - whose - source_node_group_id - matches the node group id of the current node. - - - - The - source_table_name - may contain the asterisk ('*') wildcard character so that one - - table entry can define synchronization for many tables. System tables - and any tables that start with the SymmetricDS table prefix will be - excluded. A list of wildcard tokens can also be supplied. If there are - multiple tokens, they should be delimited with a comma. A wildcard token - can also start with a bang ('!') to indicate an exclusive match. Tokens - are always evalulated from left to right. When a table match is made, - the table is either added to or removed from the list of tables. If - another trigger already exists for a table, then that table is not - included in the wildcard match (the explictly defined trigger entry take - precendence). - - - - When determining whether a data change has occurred or not, by defalt - the triggers will record a change even if the data was updated to the - same value(s) they were originally. For example, a data change will be - captured if an update of one column in a row updated the value to the - same value it already was. There is a global property, - trigger.update.capture.changed.data.only.enabled - (false by default), that allows you to override this behavior. When set - to true, SymmetricDS will only capture a change if the data has truly - changed (i.e., when the new column data is not equal to the old column - data). - - - - The property - trigger.update.capture.changed.data.only.enabled - is currently only supported in the MySQL, DB2, SQL Server and Oracle dialects. - - - - The following SQL statement defines a trigger that will capture data for - a table named "item" whenever data is inserted, updated, or deleted. The - trigger is assigned to a channel also called 'item'. - - insert into SYM_TRIGGER (trigger_id, source_table_name, - channel_id, last_update_time, create_time) - values ('item', 'item', 'item', current_timestamp, current_timestamp); - - - - Note that many databases allow for multiple triggers of the - same type to be defined. Each database defines the order in which the - triggers fire differently. If you have additional triggers beyond those - SymmetricDS installs on your table, please consult your database - documentation to determine if there will be issues with the ordering of - the triggers. - -
- Linking Triggers - - - The - - table is used to define which specific combinations of triggers and - routers are needed for your configuration. The relationship between - triggers and routers is many-to-many, so this table serves as the join - table to define which combinations are valid, as well as to define - settings available at the trigger-router level of granularity. - - - Three important controls can be configured for a specific Trigger / - Router combination: Enabled, Initial Loads and Ping Back. The parameters - for these can be found in the Trigger / Router mapping table, - - . - - -
- Enable / disable trigger router - - - Each individual trigger-router combination can be disabled or enabled if - needed. By default, a trigger router is enabled, but if you have a - reason you wish to define a trigger router combination prior to it being - active, you can set the - enabled - flag to 0. This will cause the trigger-router mapping to be sent to all - nodes, but the trigger-router mapping will not be considered active or - enabled for the purposes of capturing data changes or routing. - -
-
- Enabling "Ping Back" - - - SymmetricDS, by default, avoids circular data changes. When a trigger - fires as a result of SymmetricDS itself (such as the case when sync on - incoming batch is set), it records the originating source node of the - data change in - source_node_id - . During routing, if routing results in sending the data back to the - originating source node, the data is not routed by default. If instead - you wish to route the data back to the originating node, you can set the - ping_back_enabled - column for the needed particular trigger / router combination. This will - cause the router to "ping" the data back to the originating node when it - usually would not. - -
-
-
- Large Objects - - Two lobs-related settings are also available on - - : - - - - use_stream_lobs - - - - Specifies whether to capture lob data as the trigger is - firing or to stream lob columns from the source tables using callbacks - during extraction. A value of 1 indicates to stream from the source via - callback; a value of 0, lob data is captured by the trigger. - - - - - - use_capture_lobs - - - - Provides a hint as to whether this trigger will capture big - lobs data. If set to 1 every effort will be made during data capture in - trigger and during data selection for initial load to use lob facilities - to extract and store data in the database. - - - - -
- -
- External Select - - - Occasionally, you may find that you need to capture and save away a - piece of data present in another table when a trigger is firing. This - data is typically needed for the purposes of determining where to - 'route' the data to once routing takes place. Each trigger definition - contains an optional - external_select - field which can be used to specify the data to be captured. Once - captured, this data is available during routing in - - 's - external_data - field. For these cases, place a SQL select statement which returns the - data item you need for routing in - external_select - . An example of the use of external select can be found in - - . - -
-
- Dead Triggers - - - Occasionally the decision of what data to load initially results in - additional triggers. These triggers, known as - Dead Triggers - , are configured such that they do not capture any data changes. A - "dead" Trigger is one that does not capture data changes. In other - words, the - sync_on_insert - , - sync_on_update - , and - sync_on_delete - properties for the Trigger are all set to false. However, since the - Trigger is specified, it - will - be included in the initial load of data for target Nodes. - - - Why might you need a Dead Trigger? A dead Trigger might be - used to load a read-only lookup table, for example. It could also be - used to load a table that needs populated with example or default data. - Another use is a recovery load of data for tables that have a single - direction of synchronization. For example, a retail store records sales - transactions that synchronize in one direction by trickling back to the - central office. If the retail store needs to recover all the sales - transactions from the central office, they can be sent are part of an - initial load from the central office by setting up dead Triggers that - "sync" in that direction. - - - The following SQL statement sets up a non-syncing dead Trigger that - sends the - sale_transaction - table to the "store" Node Group from the "corp" Node Group during an - initial load. - - -
-
- Changing Triggers - - A trigger row may be updated using SQL to change a synchronization definition. - SymmetricDS will look for changes each night or whenever the Sync Triggers Job - is run (see below). For example, a change to place the table price_changes - into the price channel would be accomplished with the following statement: - - - All configuration changes should be managed centrally at the registration node. If enabled, configuration - changes will be synchronized out to client nodes. When trigger changes reach the client - nodes the Sync Triggers Job will run automatically. - - - Centrally, the trigger changes will not take effect until the Sync Triggers Job runs. - Instead of waiting for the Sync Triggers Job to run overnight after making a Trigger - change, you can invoke the syncTriggers() method over JMX or simply restart the SymmetricDS - server. A complete record of trigger changes is kept in the table , - which was discussed in . - -
-
- -
- Routers - - Routers provided in the base implementation currently include: - - Default Router - a router that sends all data to - all nodes that belong to the target node group defined in the router. - - Column Match Router - a router that compares old or - new column values to a constant value or the value of a node's - external_id or node_id. - - Lookup Router - a router which can be configured to - determine routing based on an existing or ancillary table specifically - for the purpose of routing data. - - Subselect Router - a router that executes a SQL - expression against the database to select nodes to route to. This SQL - expression can be passed values of old and new column values. - - Scripted Router - a router that executes a Bean - Shell script expression in order to select nodes to route to. The script - can use the old and new column values. - - Xml Publishing Router - a router the publishes data - changes directly to a messaging solution instead of transmitting changes - to registered nodes. This router must be configured manually in XML as - an extension point. - - Audit Table Router - a router that inserts into an - automatically created audit table. It records captured changes to tables - that it is linked to. - - The mapping between the set of triggers and set of routers is - many-to-many. This means that one trigger can capture changes and route - to multiple locations. It also means that one router can be defined an - associated with many different triggers. - - -
- Default Router - - - The simplest router is a router that sends all the data that is captured - by its associated triggers to all the nodes that belong to the target - node group defined in the router. A router is defined as a row in the - - table. It is then linked to triggers in the - - table. - - - - The following SQL statement defines a router that will send data from - the 'corp' group to the 'store' group. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, create_time, - last_update_time) values ('corp-2-store','corp', 'store', - current_timestamp, current_timestamp); - - - - The following SQL statement maps the 'corp-2-store' router to the item - trigger. - insert into SYM_TRIGGER_ROUTER - (trigger_id, router_id, initial_load_order, create_time, - last_update_time) values ('item', 'corp-2-store', 1, current_timestamp, - current_timestamp); - -
- -
- Column Match Router - - - Sometimes requirements may exist that require data to be routed based on - the current value or the old value of a column in the table that is - being routed. Column routers are configured by setting the - router_type - column on the - - table to - column - and setting the - router_expression - column to an equality expression that represents the expected value of - the column. - - - The first part of the expression is always the column name. - The column name should always be defined in upper case. The upper case - column name prefixed by OLD_ can be used for a comparison being done - with the old column data value. - - The second part of the expression can be a constant value, - a token that represents another column, or a token that represents some - other SymmetricDS concept. Token values always begin with a colon (:). - - - Consider a table that needs to be routed to all nodes in the target - group only when a status column is set to 'READY TO SEND.' The following - SQL statement will insert a column router to accomplish that. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-ok','corp', 'store', 'column', 'STATUS=READY TO SEND', - current_timestamp, current_timestamp); - - - - Consider a table that needs to be routed to all nodes in the target - group only when a status column changes values. The following SQL - statement will insert a column router to accomplish that. Note the use - of OLD_STATUS, where the OLD_ prefix gives access to the old column - value. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-status','corp', 'store', 'column', 'STATUS!=:OLD_STATUS', - current_timestamp, current_timestamp); - - - - Consider a table that needs to be routed to only nodes in the target - group whose STORE_ID column matches the external id of a node. The - following SQL statement will insert a column router to accomplish that. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-id','corp', 'store', 'column', 'STORE_ID=:EXTERNAL_ID', - current_timestamp, current_timestamp); - Attributes on a - - that can be referenced with tokens include: - - :NODE_ID - - :EXTERNAL_ID - - :NODE_GROUP_ID - - Captured EXTERNAL_DATA is also available for routing as a virtual - column. - - - - Consider a table that needs to be routed to a redirect node defined by - its external id in the - - table. The following SQL statement will insert a column router to - accomplish that. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-redirect','corp', 'store', 'column', - 'STORE_ID=:REDIRECT_NODE', current_timestamp, current_timestamp); - - - - More than one column may be configured in a router_expression. When more - than one column is configured, all matches are added to the list of - nodes to route to. The following is an example where the STORE_ID column - may contain the STORE_ID to route to or the constant of ALL which - indicates that all nodes should receive the update. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-multiple-matches','corp', 'store', 'column', - 'STORE_ID=ALL or STORE_ID=:EXTERNAL_ID', current_timestamp, - current_timestamp); - - - - The NULL keyword may be used to check if a column is null. If the column - is null, then data will be routed to all nodes who qualify for the - update. This following is an example where the STORE_ID column is used - to route to a set of nodes who have a STORE_ID equal to their - EXTERNAL_ID, or to all nodes if the STORE_ID is null. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-multiple-matches','corp', 'store', 'column', - 'STORE_ID=NULL or STORE_ID=:EXTERNAL_ID', current_timestamp, - current_timestamp); - -
- -
- Lookup Table Router - - - A lookup table may contain the id of the node where data needs to be - routed. This could be an existing table or an ancillary table that is - added specifically for the purpose of routing data. Lookup table routers - are configured by setting the - router_type - column on the - - table to - lookuptable - and setting a list of configuration parameters in the - router_expression - column. - - - - Each of the following configuration parameters are required. - - - - LOOKUP_TABLE - - - - This is the name of the lookup table. - - - - - - KEY_COLUMN - - - - This is the name of the column on the table that is being - routed. It will be used as a key into the lookup table. - - - - - - LOOKUP_KEY_COLUMN - - - - This is the name of the column that is the key on the - lookup table. - - - - - - EXTERNAL_ID_COLUMN - - - - This is the name of the column that contains the - external_id of the node to route to on the lookup table. - - - - - - Note that the lookup table will be read into memory and - cached for the duration of a routing pass for a single channel. - - - Consider a table that needs to be routed to a specific store, but the - data in the changing table only contains brand information. In this - case, the STORE table may be used as a lookup table. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-ok','corp', 'store', 'lookuptable', 'LOOKUP_TABLE=STORE - KEY_COLUMN=BRAND_ID LOOKUP_KEY_COLUMN=BRAND_ID - EXTERNAL_ID_COLUMN=STORE_ID', current_timestamp, current_timestamp); - -
- -
- Subselect Router - - - Sometimes routing decisions need to be made based on data that is not in - the current row being synchronized. A 'subselect' router can be used in - these cases. A 'subselect' is configured with a - router_expression - that is a SQL select statement which returns a result set of the node - ids that need routed to. Column tokens can be used in the SQL expression - and will be replaced with row column data. The overhead of using this - router type is high because the 'subselect' statement runs for each row - that is routed. It should not be used for tables that have a lot of rows - that are updated. It also has the disadvantage that if the data being - relied on to determine the node id has been deleted before routing takes - place, then no results would be returned and routing would not happen. - - - The - router_expression - you specify is appended to the following SQL statement in order to - select the node ids: - select c.node_id from sym_node c where - c.node_group_id=:NODE_GROUP_ID and c.sync_enabled=1 and ... - - As you can see, you have access to information about the node currently - under consideration for routing through the 'c' alias, for example - c.external_id - . There are two node-related tokens you can use in your expression: - - :NODE_GROUP_ID - :EXTERNAL_DATA - - - Column names representing data for the row in question are prefixed with - a colon as well, for example: - - :EMPLOYEE_ID - , or - :OLD_EMPLOYEE_ID - . Here, the OLD_ prefix indicates the value before the change in cases - where the old data has been captured. - - - For an example, consider the case where an Order table and - an OrderLineItem table need to be routed to a specific store. The Order - table has a column named order_id and STORE_ID. A store node has an - external_id that is equal to the STORE_ID on the Order table. - OrderLineItem, however, only has a foreign key to its Order of order_id. - To route OrderLineItems to the same nodes that the Order will be routed - to, we need to reference the master Order record. - - - There are two possible ways to solve this in SymmetricDS. One is to - configure a 'subselect' router_type on the - - table, shown below (The other possible approach is to use an - external_select - to capture the data via a trigger for use in a column match router, - demonstrated in - - ). - - - - Our solution utilizing subselect compares the external id of the current - node with the store id from the Order table where the order id matches - the order id of the current row being routed: - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store','corp', 'store', 'subselect', 'c.external_id in (select - STORE_ID from order where order_id=:ORDER_ID)', current_timestamp, - current_timestamp); - - - As a final note, please note in this example that the - parent row in Order must still exist at the moment of routing for the - child rows (OrderLineItem) to route, since the select statement is run - when routing is occurring, not when the change data is first captured. - -
- -
- Scripted Router - - - When more flexibility is needed in the logic to choose the nodes to - route to, then the a scripted router may be used. The currently - available scripting language is Bean Shell. Bean Shell is a Java-like - scripting language. Documentation for the Bean Shell scripting language - can be found at - http://www.beanshell.org - . - - - - The router_type for a Bean Shell scripted router is 'bsh'. The - router_expression is a valid Bean Shell script that: - - - adds node ids to the - targetNodes - collection which is bound to the script - - - returns a new collection of node ids - - returns a single node id - - returns true to indicate that all nodes should be - routed or returns false to indicate that no nodes should be routed - - Also bound to the script evaluation is a list of - nodes - . The list of - nodes - is a list of eligible - org.jumpmind.symmetric.model.Node - objects. The current data column values and the old data column values - are bound to the script evaluation as Java object representations of the - column data. The columns are bound using the uppercase names of the - columns. Old values are bound to uppercase representations that are - prefixed with 'OLD_'. - - - - If you need access to any of the SymmetricDS services, then the instance - of - org.jumpmind.symmetric.ISymmetricEngine - is accessible via the bound - engine - variable. - - - - In the following example, the node_id is a combination of STORE_ID and - WORKSTATION_NUMBER, both of which are columns on the table that is being - routed. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-bsh','corp', 'store', 'bsh', 'targetNodes.add(STORE_ID + - "-" + WORKSTATION_NUMBER);', current_timestamp, current_timestamp); - - - - The same could also be accomplished by simply returning the node id. The - last line of a bsh script is always the return value. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-bsh','corp', 'store', 'bsh', 'STORE_ID + "-" + - WORKSTATION_NUMBER', current_timestamp, current_timestamp); - - - - The following example will synchronize to all nodes if the FLAG column - has changed, otherwise no nodes will be synchronized. Note that here we - make use of OLD_, which provides access to the old column value. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-flag-changed','corp', 'store', 'bsh', 'FLAG != null - && !FLAG.equals(OLD_FLAG)', current_timestamp, - current_timestamp); - - - - The next example shows a script that iterates over each eligible node - and checks to see if the trimmed value of the column named STATION - equals the external_id. - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-trimmed-station','corp', 'store', 'bsh', 'for - (org.jumpmind.symmetric.model.Node node : nodes) { if (STATION != null - && node.getExternalId().equals(STATION.trim())) { - targetNodes.add(node.getNodeId()); } }', current_timestamp, - current_timestamp); - -
- -
- Audit Table Router - - - This router audits captured data by recording the change in an audit - table that the router creates and keeps up to date (as long as - auto.config.database - is set to true.) The router creates a table named the same as the table - for which data was captured with the suffix of _AUDIT. It will contain - all of the same columns as the original table with the same data types - only each column is nullable with no default values. - - - - Three extra "AUDIT" columns are added to the table: - - AUDIT_ID - the primary key of the table. - AUDIT_TIME - the time at which the change occurred. - AUDIT_EVENT - the DML type that happened to the - row. - - - - - The following is an example of an audit router - insert into SYM_ROUTER (router_id, - source_node_group_id, target_node_group_id, router_type, create_time, - last_update_time) values ('audit_at_corp','corp', 'local', 'audit', - current_timestamp, current_timestamp); - - - The audit router captures data for a group link. For the - audit router to work it must be associated with a node_group_link with - an action of type 'R'. The 'R' stands for 'only routes to'. In the above - example, we refer to a 'corp to local' group link. Here, local is a new - node_group created for the audit router. No nodes belong to the 'local' - node_group. If a trigger linked to an audit router fires on the corp - node, a new audit table will be created at the corp node with the new - data inserted. -
- - - - -
- Utilizing External Select when Routing - - - - - There may be times when you wish to route based on a piece of data that - exists in a table other than the one being routed. The approach, first - discussed in - - , is to utilize an - external_select - to save away data in - external_data - , which can then be referenced during routing. - - - Reconsider subselect's Order / OrderLineItem example (found in - - ), where routing for the line item is accomplished by linking to the - "header" Order row. As an alternate way of solving the problem, we will - now use External Select combined with a column match router. - - - In this version of the solution, the STORE_ID is captured from the Order - table in the EXTERNAL_DATA column when the trigger fires. The router is - configured to route based on the captured EXTERNAL_DATA to all nodes - whose external id matches the captured external data. - insert into SYM_TRIGGER - (trigger_id,source_table_name,channel_id,external_select, - last_update_time,create_time) values ('orderlineitem', 'orderlineitem', - 'orderlineitem','select STORE_ID from order where - order_id=$(curTriggerValue).$(curColumnPrefix)order_id', - current_timestamp, current_timestamp); insert into SYM_ROUTER - (router_id, source_node_group_id, target_node_group_id, router_type, - router_expression, create_time, last_update_time) values - ('corp-2-store-ext','corp', 'store', 'column', - 'EXTERNAL_DATA=:EXTERNAL_ID', current_timestamp, current_timestamp); - - - The following variables can be used with the external select: - - - - - $(curTriggerValue) - - - - Variable to be replaced with the NEW or OLD column alias provided by the trigger context, which is platform specific. - For insert and update triggers, the NEW alias is used; for delete triggers, the OLD alias is used. - For example, "$(curTriggerValue).COLUMN" becomes ":new.COLUMN" for an insert trigger on Oracle. - - - - - - - $(curColumnPrefix) - - - - Variable to be replaced with the NEW_ or OLD_ column prefix for platforms that don't support column aliases. - This is currently only used by the H2 database. All other platforms will replace the variable with an empty string. - For example "$(curColumnPrefix)COLUMN" becomes "NEW_COLUMN" on H2 and "COLUMN" on Oracle. - - - - - - The advantage of this approach over the 'subselect' - approach is that it guards against the (somewhat unlikely) possibility - that the master Order table row might have been deleted before routing - has taken place. This external select solution also is a bit more - efficient than the 'subselect' approach, although the triggers produced - do run the extra external_select SQL inline with application database - updates. - -
- -
-
- Conflicts - -
-
- Transforms - -New as of SymmetricDS 2.4, SymmetricDS is now able to -transform synchronized data by way of configuration (previously, for -most cases a custom data loader would need to have been written). This -transformation can take place on a source node or on a target node, as -the data is being loaded or extracted. With this new feature you can, -for example: - - - -Copy a column from a source table to two (or more) target -table columns, - - - -Merge columns from two or more source tables into a single -row in a target table, - - - -Insert constants in columns in target tables based on -source data synchronizations, - - - -Insert multiple rows of data into a single target table -based on one change in a source table, - - - -Apply a Bean Shell script to achieve a custom transform -when loading into the target database. - - - -These transformations can take place either on the target -or on the source, and as data is either being extracted or loaded. In -either case, the transformation is initiated due to existence of a -source synchronization trigger. The source trigger creates the -synchronization data, while the transformation configuration decides -what to do with the synchronization data as it is either being extracted -from the source or loaded into the target. You have the flexibility of -defining different transformation behavior depending on whether the -source change that triggered the synchronization was an Insert, Update, -or Delete. In the case of Delete, you even have options on what exactly -to do on the target side, be it a delete of a row, setting columns to -specific values, or absolutely nothing at all. - -A few key concepts are important to keep in mind to -understand how SymmetricDS performs transformations. The first concept -is that of the "source operation" or "source DML type", which is the -type of operation that occurred to generate the synchronization data in -the first place (i.e., an insert, a delete, or an update). Your -transformations can be configured to act differently based on the source -DML type, if desired. When transforming, by default the DML action taken -on the target matches that of the action taken on the row in the source -(although this behavior can be altered through configuration if needed). -If the source DML type is an Insert, for example, the resulting -transformation DML(s) will be Insert(s). - -Another important concept is the way in which transforms -are applied. Each source operation may map to one or more transforms and -result in one or more operations on the target tables. Each of these -target operations are performed as independent operations in sequence -and must be "complete" from a SQL perspective. In other words, you must -define columns for the transformation that are sufficient to fill in any -primary key or other required data in the target table if the source -operation was an Insert, for example. - -Please note that the transformation engine relies -on a source trigger / router existing to supply the source data for the -transformation. The transform configuration will never be used if the -source table and target node group does not have a defined trigger / -router combination for that source table and target node group. - -
-Transform Configuration Tables - - -SymmetricDS stores its transformation configuration in two configuration -tables, - -and - -. Defining a transformation involves configuration in both tables, with -the first table defining which source and destination tables are -involved, and the second defining the columns involved in the -transformation and the behavior of the data for those columns. We will -explain the various options available in both tables and the various -pre-defined transformation types. - - - - -To define a transformation, you will first define the source table and -target table that applies to a particular transformation. The source and -target tables, along with a unique identifier (the transform_id column) -are defined in - -. In addition, you will specify the source_node_group_id and -target_node_group_id to which the transform will apply, along with -whether the transform should occur on the Extract step or the Load step -(transform_point). All of these values are required. - - - -Three additional configuration settings are also defined at the -source-target table level: the order of the transformations, the -behavior when deleting, and whether an update should always be attempted -first. More specifically, - -transform_order: For a single source operation that -is mapped to a transformation, there could be more than one target -operation that takes place. You may control the order in which the -target operations are applied through a configuration parameter defined -for each source-target table combination. This might be important, for -example, if the foreign key relationships on the target tables require -you to execute the transformations in a particular order. - - -column_policy: Indicates whether unspecified columns are passed thru or -if all columns must be explicitly defined. The options include: - -SPECIFIED - Indicates that only the transform -columns that are defined will be the ones that end up as part of the -transformation. - -IMPLIED - Indicates that if not specified, then -columns from the source are passed through to the target. This is useful -if you just want to map a table from one name to anther or from one -schema to another. It is also useful if you want to transform a table, -but also want to pass it through. You would define an implied transform -from the source to the target and would not have to configure each -column. - - - - -update_action: When a source operation of Update takes place, there are -three possible ways to handle the transformation at the target. The -options include: - -NONE - The update results in no target changes. - -DEL_ROW - The update results in a delete of the row -as specified by the pk columns defined in the transformation -configuration. - -UPDATE_COL - The update results in an Update -operation on the target which updates the specific rows and columns -based on the defined transformation. - -BeanShell Script Transform ('bsh'): - script code which returns one of the above items. - you can use COLUMN variables inside the script. - - - - -delete_action: When a source operation of Delete takes place, there are -three possible ways to handle the transformation at the target. The -options include: - -NONE - The delete results in no target changes. - -DEL_ROW - The delete results in a delete of the row -as specified by the pk columns defined in the transformation -configuration. - -UPDATE_COL - The delete results in an Update -operation on the target which updates the specific rows and columns -based on the defined transformation. - - - - -update_first: This option overrides the default behavior for an Insert -operation. Instead of attempting the Insert first, SymmetricDS will -always perform an Update first and then fall back to an Insert if that -fails. Note that, by default, fall back logic -always -applies for Insert and Updates. Here, all you a specifying is whether to -always do an Update first, which can have performance benefits under -certain situations you may run into. - - - - - -For each transformation defined in - -, the columns to be transformed (and how they are transformed) are -defined in - -. This column-level table typically has several rows for each -transformation id, each of which defines the source column name, the -target column name, as well as the following details: - -include_on: Defines whether this entry applies to -source operations of Insert (I), Update (U), or Delete (D), or any -source operation. - -pk: Indicates that this mapping is used to define -the "primary key" for identifying the target row(s) (which may or may -not be the true primary key of the target table). This is used to define -the "where" clause when an Update or Delete on the target is occurring. -At least one row marked as a pk should be present for each transform_id. - -transform_type, transform_expression: Specifies how -the data is modified, if at all. The available transform types are -discussed below, and the default is 'copy', which just copies the data -from source to target. - -transform_order: In the event there are more than -one columns to transform, this defines the relative order in which the -transformations are applied. - - -
- -
-Transformation Types - - There are several pre-defined transform types available in SymmetricDS. - Additional ones can be defined by creating and configuring an extension - point which implements the - IColumnTransform - interface. The pre-defined transform types include the following (the - transform_type entry is shown in parentheses): - - Copy Column Transform ('copy'): This transformation - type copies the source column value to the target column. This is the - default behavior. - - Remove Column Transform ('remove'): This - transformation type removes the source column. This transform type is - only valid for a table transformation type of 'IMPLIED' where all the - columns from the source are automatically copied to the target. - - Constant Transform ('const'): This transformation - type allows you to map a constant value to the given target column. The - constant itself is placed in transform_expression. - - - Variable Transform ('variable'): This transformation type allows you to - map a built-in dynamic variable to the given target column. The variable - name is placed in transform_expression. The following variables are - available: - system_date - is the current system date, - system_timestamp - is the current system date and time, - source_node_id - is the node id of the source, - target_node_id - is the node id of the target, - null - is a null value, and old_column_value is the column's old value prior to the DML operation, - source_table_name - is the name of the source table as captured in the trigger hist table, - source_catalog_name - is the name of the source catalog as captured in the trigger hist table, - source_schema_name - is the name of the source schema as captured in the trigger hist table. - - - Additive Transform ('additive'): This - transformation type is used for numeric data. It computes the change - between the old and new values on the source and then adds the change to - the existing value in the target column. That is, target = target + - multiplier (source_new - source_old), where multiplier is a constant - found in the transform_expression (default is 1 if not specified). For - example, if the source column changed from a 2 to a 4, the target column - is currently 10, and the multiplier is 3, the effect of the transform - will be to change the target column to a value of 16 ( 10+3*(4-2) => - 16 ). Note that, in the case of deletes, the new column value is - considered 0 for the purposes of the calculation. - - - Left Transform ('left'): This transformation keeps the first N characters of - a column. The transform_expression is the number of characters to keep. - - - - Binary Left Transform ('bleft'): This transformation keeps characters that are part of the first N bytes - of a column. The transform_expression is the number of bytes to keep. - - - - Substring Transform ('substr'): This transformation computes a substring - of the source column data and uses the substring as the target column - value. The transform_expression can be a single integer ( - n - , the beginning index), or a pair of comma-separated integers ( - n,m - - the beginning and ending index). The transform behaves as the Java - substring function would using the specified values in - transform_expression. - - - Multiplier Transform ('multiply'): This - transformation allows for the creation of multiple rows in the target - table based on the transform_expression. This transform type can only be - used on a primary key column. The transform_expression is a SQL - statement that returns the list to be used to create the multiple - targets. - - Lookup Transform ('lookup'): This transformation - determines the target column value by using a query, contained in - transform_expression to lookup the value in another table. The query - must return a single row, and the first column of the query is used as - the value. Your query references source column names by prefixing with a - colon (e.g., :MY_COLUMN). - - - BeanShell Script Transform ('bsh'): This transformation allows you to - provide a Bean Shell script in transform_expression and executes the - script at the time of transformation. Some variables are provided to the - script: - COLUMN_NAME - is a variable for a source column in the row, where the variable name is - the column name in uppercase; - currentValue - is the value of the current source column; - oldValue - is the old value of the source column for an updated row; - sqlTemplate - is a - org.jumpmind.db.sql.ISqlTemplate - object for querying or updating the database; - channelId - is a reference to the channel on which the transformation is happening; - sourceNode - is a - org.jumpmind.symmetric.model.Node - object that represents the node from where the data came; - targetNode - is a - org.jumpmind.symmetric.model.Node - object that represents the node where the data is being loaded. - - - - Java Transform ('java'): Use Java code in the transform expression that is included in the - transform method of a class that extends JavaColumnTransform. The class is compiled - whenever the transform expression changes and kept in memory for runtime. - The code must return a String for the new value of the column being mapped. - The following variables are available: - platform - is the IDatabasePlatform that contains objects for the database platform, such as - DatabaseInfo, IDdlReader, IDdlBuilder, and ISqlTemplate. - context - is the DataContext that contains information about current row and the data loader session, such as - Batch, Table, and CsvData. - column - is the TransformColumn that contains information from the TRANSFORM_COLUMN configuration. - data - is the TransformedData that contains information about the source and target values being transformed, - including the TransformTable. - sourceValues - is a Map<String, String> contain all source column values for the row. - newValue - is a String for the new value of the column. - oldValue - is a String for the old value of the column if the event is an update or delete. - - - Identity Transform ('identity'): This - transformation allows you to insert into an identity column by computing - a new identity, not copying the actual identity value from the source. - - - - Mathematical Transform ('math'): This transformation allows you to - perform mathematical equations in the transform expression. Some - variables are provided to the script: - #{COLUMN_NAME} - is a variable for a source column in the row, where the variable name - is the column name in uppercase; - #{currentValue} - is the value of the current source column; - #{oldValue} - is the old value of the source column for an updated row. - - - - Copy If Changed Transform ('copyIfChanged'): This transformation will copy the value to the target column if the source value has changed. More - specifically, the copy will occur if the the old value of the source does not equal the new value. If the old and new are, in fact, equal, then either - the column will be ignored or the row will be ignored, based on the setting of the transform expression. If the transform expression is euqal - to the string 'IgnoreColumn', the column will be ignored; otherwise, the row will be ignored. - - - - - Value Map Transform ('valueMap'): This transformation allows for simple value substitutions through use of the transform expression. - The transform expresion should consist of a space separated list of value pairs of the format sourceValue=TargetValue. The column value is used to - locate the correct sourceValue, and the transform will change the value into the corresponding targetValue. A sourceValue of * can be used to - represent a default target value in the event that the sourceValue is not found. Otherwise, if no default value is found, - the result will be null. For example, consider the following transform expression: s1=t1 s2=t2 s3=t3 *=t4. A source value of - s1 will be transformed to t1, s2 to t2, s3 to t3, s4 to t4, s5 to t4, null to t4, etc. - - - - Clarion Date Time ('clarionDateTime'): Convert a Clarion date with optional time into a timestamp. Clarion dates are stored as the number of days - since December 28, 1800, while Clarion times are stored as hundredths of a second since midnight, plus one. Use a source column of the Clarion date - and a target column of the timestamp. Optionally, in the transform expression, enter the name of the Clarion time column. - - - - Columns To Rows ('columnsToRowsKey' and 'columnsToRowsValue'): Convert column values from a single source row into a row per column value - at the target. Two column mappings are needed to complete the work: use "columnsToRowsKey" to map which source column is used, - and use "columnsToRowsValue" to map the value. The "columnsToRowsKey" mapping requires an expression in the format of - "column1=key1,column2=key2" to list the source column names and which key value is stored in the target column. - The "columnsToRowsValue" mapping sets the column's value at the target and allows an optional expression: - "changesOnly=true" to convert only rows when the old and new values have changed; "ignoreNulls=true" to convert only rows that are not null. - For example, column "fieldid" mapped as "columnsToRowsKey" with expression of "user1=1,user2=2" and column "color" mapped as - "columnsToRowsValue" would convert a row with columns named "user1" and "user2" containing values "red" and "blue" into two rows with columns - "fieldid" and "color" containing a row of "1" and "red" and a row of "2" and "blue". - - - - -
-
-
- Load Filters - - New as of SymmetricDS 3.1, SymmetricDS is now capable of taking actions - upon the load of certain data via configurable load filters. This new - configurable option is in additon to the already existing option of - writing a class that implements - - . A configurable load filter watches for specific data that is being - loaded and then takes action based on the load of that data. - - - Specifying which data to action is done by specifying a - souce and target node group (data extracted from this node group, and - loaded into that node group), and a target catalog, schema and table - name. You can decide to take action on rows that are inserted, updated - and/or deleted, and can also further delineate which rows of the target - table to take action on by specifying additional criteria in the bean - shell script that is executed in response to the loaded data. As an - example, old and new values for the row of data being loaded are - available in the bean shell script, so you can action rows with a - certain column value in old or new data. - - The action taken is based on a bean shell script that you - can provide as part of the configuration. Actions can be taken at - different points in the load process including before write, after - write, at batch complete, at batch commit and/or at batch rollback. - -
- Load Filter Configuration Table - - - SymmetricDS stores its load filter configuration in a single table - called - - . The load filter table allows you to specify the following: - - Load Filter Type ('load_filter_type'): The type of - load filter. Today only Bean Shell is supported ('BSH'), but SQL scripts - may be added in a future release. - - Source Node Group ('source_node_group_id'): The - source node group for which you would like to watch for changes. - - Target Node Group ('target_node_group_id'): The - target node group for which you would like to watch for changes. The - source and target not groups are used together to identify the node - group link for which you would like to watch for changes (i.e. When the - Server node group sends data to a Client node group). - - Target Catalog ('target_catalog_name'): The name of - the target catalog for which you would like to watch for changes. - - Target Schema ('target_schema_name'): The name of - the target schema for which you would like to watch for changes. - - Target Table ('target_table_name'): The name of the - target table for which you would like to watch for changes. The target - catalog, target schema and target table name are used together to fully - qualify the table for which you would like to watch for changes. - - Filter on Update ('filter_on_update'): Determines - whether the load filter takes action (executes) on a database update - statement. - - Filter on Insert ('filter_on_insert'): Determines - whether the load filter takes action (executes) on a database insert - statement. - - Filter on Delete ('filter_on_delete'): Determines - whether the load filter takes action (executes) on a database delete - statement. - - Before Write Script ('before_write_script'): The - script to execute before the database write occurs. - - After Write Script ('after_write_script'): The - script to execute after the database write occurs. - - Batch Complete Script ('batch_complete_script'): - The script to execute after the entire batch completes. - - Batch Commit Script ('batch_commit_script'): The - script to execute after the entire batch is committed. - - Batch Rollback Script ('batch_rollback_script'): - The script to execute if the batch rolls back. - - Handle Error Script ('handle_error_script'): A - script to execute if data cannot be processed. - - Load Filter Order ('load_filter_order'): The order - in which load filters should execute if there are multiple scripts - pertaining to the same source and target data. - - -
- -
- Variables available to Data Load Filters - - - As part of the bean shell load filters, SymmetricDS provides certain - variables for use in the bean shell script. Those variables include: - - Symmetric Engine ('ENGINE'): The Symmetric engine - object. - - Source Values ('<COLUMN_NAME>'): The source - values for the row being inserted, updated or deleted. - - Old Values ('OLD_<COLUMN_NAME>'): The old - values for the row being inserted, updated or deleted. - - Data Context ('CONTEXT'): The data context object - for the data being inserted, updated or deleted. . - - Table Data ('TABLE'): The table object for the - table being inserted, updated or deleted. - - -
- -
- Data Load Filter Example - - - The following is an example of a load filter that watches a table named - TABLE_TO_WATCH being loaded from the Server Node Group to the Client - Node Group for inserts or updates, and performs an initial load on a - table named "TABLE_TO_RELOAD" for KEY_FIELD on the reload table equal to - a column named KEY_FIELD on the TABLE_TO_WATCH table. - insert into sym_load_filter - (LOAD_FILTER_ID, LOAD_FILTER_TYPE, SOURCE_NODE_GROUP_ID, - TARGET_NODE_GROUP_ID, TARGET_CATALOG_NAME, TARGET_SCHEMA_NAME, - TARGET_TABLE_NAME, FILTER_ON_UPDATE, FILTER_ON_INSERT, FILTER_ON_DELETE, - BEFORE_WRITE_SCRIPT, AFTER_WRITE_SCRIPT, BATCH_COMPLETE_SCRIPT, - BATCH_COMMIT_SCRIPT, BATCH_ROLLBACK_SCRIPT, HANDLE_ERROR_SCRIPT, - CREATE_TIME, LAST_UPDATE_BY, LAST_UPDATE_TIME, LOAD_FILTER_ORDER, - FAIL_ON_ERROR) values - ('TABLE_TO_RELOAD','BSH','Client','Server',NULL,NULL, - 'TABLE_TO_WATCH',1,1,0,null, - 'engine.getDataService().reloadTable(context.getBatch().getSourceNodeId(), - table.getCatalog(), table.getSchema(), "TABLE_TO_RELOAD","KEY_FIELD=''" - + KEY_FIELD + "''");' - ,null,null,null,null,sysdate,'userid',sysdate,1,1); - -
-
-
- Grouplets - - As you probably know by now, SymmetricDS stores its single configuration centrally and distributes it to all nodes. By default, a trigger-router is in effect for all nodes in the source node group or target node group. Triggers will be established - on each node that is a member of the source node, and changes will be routed to all relevant nodes that are members of the target node group. If, for example, the router routes to "all" nodes, - "all" means every node that is in the target node group. This is the default behavior of SymmetricDS. - - - Once in production, however, you will likely find you need or want to make configuration changes to triggers and routers as new features are rolled out to your network of SymmetricDS nodes. - You may, for example, wish to "pilot" a new configuration, containing new synchronizations, only on specific nodes initially, and then increase the size of the pilot over time. - SymmetricDS' does provide the ability to specify that only particular trigger-router combinations are applicable to particular nodes for this purpose. It does this - by allowing you to define an arbitray collection of nodes, called a "grouplet", and then choosing which trigger-routers apply to the normal set of nodes (the default behavior) - and which apply just to nodes in one or more "grouplets". This allows you, essentially, to filter the list of nodes that would otherwise be included as source nodes and/or target nodes. - Through the use of grouplets, you can, for example, specify a subset of nodes on which a given trigger would be created. It also allows you to - specify a subset of the normal set of nodes a change would be routed to. This behaviour is in addition to, and occurs before, any subsetting or filtering the router might otherwise do. - - - In its simplest form, a grouplet is just an arbitrary collection of nodes. To define a grouplet, you start by creating a grouplet with a unique id, a description, and a link policy, - as defined in . To defined which nodes are members of (or are not members of) a grouplet, you provide a list of external ids of the nodes - in . How those external ids are used varies based on the grouplet link policy. - The grouplet_link_policy can be either I or E, representing an "inclusive" list of nodes or an "exclusive" list of - nodes, respectively. In the case of "inclusive", you'll be listing each external id to be included in the grouplet. In the case of exclusive, all nodes will be included in - the grouplet except ones which have an external id in the list of external ids. - - - - Once you have defined your grouplet and which nodes are members of a grouplet, you can tie a grouplet to a given trigger-router through - the use of . - If a particular trigger-router does not appear in this table, SymmetricDS behaves as normal. - If, however, an entry for a particular trigger-router appears in this table, the default behavior is overridden based on the grouplet_id and applies_when settings. - The grouplet id provides the node list, and the applies_when indicates whether the grouplet nodes are to be used to filter the source node list, the target node list, - or both (settings are "S", "T", and "B", respectively). Nodes that survive the filtering process on as a source will have a trigger defined, and nodes that survive the filtering process - as a target are eligible nodes that can be routed to. -
- Grouplet Example - - - - At this point, an example would probably be useful. Picture the case where you have 100 retail stores (each containing one database, and each a member of the "store" node group) - and a central office database (external id of corp, and a member of the "corp" node group ). You wish to pilot two new trigger and routers - for a new feature on your point-of-sale software (one which moves data from corp to store, and one which moves data from store to corp), but you only want the triggers to be installed on 10 specific stores that represent your "pilot" stores. In this case, - the simplest approach would be to define a grouplet with, say, a grouplet id of "pilot". We'd use a grouplet link policy of "inclusive", and list each of the 10 external ids - in the table. - - - For the trigger-router meant to send data from corp to store, we'd create an entry in for - our grouplet id of "pilot", and we'd specify "T" (target) as the applies-when setting. In this way, the source node list is not filtered, but the target node list used during routing - will filter the potential target nodes to just our pilot stores. For the trigger-router meant to send data from a pilot store back to corp, we would have the grouplet apply when - the node is in the source node list (i.e., applies_when will be "S"). This will cause the trigger to only be created for stores in the pilot list and not other stores. - - An important thing to mention in this example: Since your grouplet only included the store nodes, you can't simply specify "both" for the applies when setting. For the corp-to-store trigger, - for example, if you had said "both", no trigger would have been installed in corp since the grouplet nodes represent all possible source nodes as well as target nodes, and "corp" is not in the list! - The same is true for the store to corp trigger-router as well. You could, however, use "both" as the applies when if you had included the "corp" external id in with the list of the 10 pilot store external ids. - -
-
-
- Parameters - Parameters can be used to help tune and configure your SymmetricDS configuration. Parameters can be set for an individual node or for all nodes in your network. - See , for a complete list of parameters. -
-
- Export -
-
- Import -
-
- Uninstall -
- - -
diff --git a/symmetric-assemble/src/docbook/configuration/channels.xml b/symmetric-assemble/src/docbook/configuration/channels.xml deleted file mode 100644 index be1e410d78..0000000000 --- a/symmetric-assemble/src/docbook/configuration/channels.xml +++ /dev/null @@ -1,269 +0,0 @@ -
- - Channels - - - Channels group the data to be synchronized. Channels can be disabled, suspended, or scheduled as needed. - - - - - - Channel ID - - - Identifier used through the system to identify a given channel. - - - - - Enabled - - - Indicator to determine if a channel is currently in use. - - - - - Processsing Order - - - Numeric value to determine the order in which a channel will be used. - - - - - Batch Algorithm - - - Batching is the grouping of data, by channel, to be transferred and committed at the client together. - - Channel Batching Algorithms - - - - - - Default - All changes that happen in a transaction are guaranteed to - be batched together. Multiple transactions will be batched and committed - together until there is no more data to be sent or the max_batch_size is - reached. - - - Transactional - Batches will map directly to database transactions. If - there are many small database transactions, then there will be many - batches. The max_batch_size column has no effect. - - - Nontransactional - Multiple transactions will be batched and committed - together until there is no more data to be sent or the max_batch_size is - reached. The batch will be cut off at the max_batch_size regardless of - whether it is in the middle of a transaction. - - - -
- -
-
- - - Reload Channel - - - Indicator to determine if a channel is available for initial loads and reverse initial loads. - - - - - File Sync Channel - - - Indicator to determine if a channel is available for file synchronization. - - - - - Max Batch Size - - - Specifies the maximum number of data events to process - within a batch for this channel. - - - - - Max Batch To Send - - - Specifies the maximum number of batches to send for a given - channel during a 'synchronization' between two nodes. A - 'synchronization' is equivalent to a push or a pull. For example, if - there are 12 batches ready to be sent for a channel and - max_batch_to_send is equal to 10, then only the first 10 batches will be - sent even though 12 batches are ready. - - - - - Max Data To Route - - - Specifies the maximum number of data rows to route for a - channel at a time. - - - - - Use Old Data To Route - - - Indicates if the old data will be included for routing. Routing can then use this data for processing. - - - - - Use Row Data To Route - - - Indicates if the current data will be included for routing. Routing can then use this data for processing. - - - - - Use Primary Key (PK) Data to Route - - - Indicates if the primary key data will be include for routing. For example maybe a store ID is needed to apply logic on before sending to the appropriate target nodes. - - - - - Tables Contain Big Lobs - - - Indicator to provide SymmetricDS the hint - that the channel contains big lobs. Some databases have shortcuts that - SymmetricDS can take advantage of if it knows that the lob columns in - - aren't going to contain large lobs. The definition of how large a 'big' - lob is varies from database to database. - - - - - Data Loader Type - - - Determines how data will be loaded into the target tables. These are used during an initial load or a reverse initial load. - - Data Loader Types - - - - - - default - Performs an insert first and if this fails will fall back to an update to load the data. - - - ftp_localhost - Sends the data in CSV format to a configured ftp location. These locations are setup in the {SYM_HOME}/conf/ftp-extensions.xml - - - mysql_bulk - MySQL bulk loader. See for more info. - - - mssql_bulk - Microsoft SQL Server bulk loader. See for more info. - - - postgres_bulk - PostgreSQL bulk loader. See for more info. - - - oracle_bulk - Oracle bulk loader. See for more info. - - - -
-
-
-
- - Provided Channels - - - - - - - - - - - - Config - Used for all SymmetricDS configuration. - - - Default - Default channel used for all data synchronization (except initial loads, see reload channel). - - - Dynamic - - - - Filesync - Default channel used for all file synchronizations. - - - Filesync_reload - Used for initial loads relating to file synchronization. - - - Heartbeat - Sends heartbeat messages between nodes to ensure they are online. - - - Reload - Handles intial loads and revers initial loads. - - - -
-
-
-
- - Channel Examples - - - - - Channel Tips and Tricks - - Transactions will NOT be preserved across channels so its important to setup channels to contain all tables that participate in a given transaction. - - - - Increase performance by creating designated channels for tables that use LOB data types. - For these channels be sure to check the "Table Contains Big Lobs" to increase performance. - - - - -
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/configuration/examples/channels.xml b/symmetric-assemble/src/docbook/configuration/examples/channels.xml deleted file mode 100644 index e88490d1a0..0000000000 --- a/symmetric-assemble/src/docbook/configuration/examples/channels.xml +++ /dev/null @@ -1,9 +0,0 @@ - - insert into SYM_CHANNEL (channel_id, rocessing_order, max_batch_size, max_batch_to_send, - extract_period_millis, batch_algorithm, enabled, description) - values ('item', 10, 1000, 10, 0, 'default', 1, 'Item and pricing data'); - - insert into SYM_CHANNEL (channel_id, processing_order, max_batch_size, - max_batch_to_send, extract_period_millis, batch_algorithm, enabled, description) - values ('sale_transaction', 1, 1000, 10, 60000, - 'transactional', 1, 'retail sale transactions from register'); \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/configuration/examples/group-links.xml b/symmetric-assemble/src/docbook/configuration/examples/group-links.xml deleted file mode 100644 index 4acdd49ed9..0000000000 --- a/symmetric-assemble/src/docbook/configuration/examples/group-links.xml +++ /dev/null @@ -1,9 +0,0 @@ - -insert into SYM_NODE_GROUP_LINK -(source_node_group, target_node_group, data_event_action) - values ('store', 'corp', 'P'); - -insert into SYM_NODE_GROUP_LINK -(source_node_group, target_node_group, data_event_action) - values ('corp', 'store', 'W'); - \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/configuration/examples/groups.xml b/symmetric-assemble/src/docbook/configuration/examples/groups.xml deleted file mode 100644 index fd9998e03c..0000000000 --- a/symmetric-assemble/src/docbook/configuration/examples/groups.xml +++ /dev/null @@ -1,9 +0,0 @@ - -insert into SYM_NODE_GROUP - (node_group_id, description) - values ('store', 'A retail store node'); - -insert into SYM_NODE_GROUP - (node_group_id, description) - values ('corp', 'A corporate node'); - \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/configuration/group-links.xml b/symmetric-assemble/src/docbook/configuration/group-links.xml deleted file mode 100644 index f87a038068..0000000000 --- a/symmetric-assemble/src/docbook/configuration/group-links.xml +++ /dev/null @@ -1,97 +0,0 @@ - \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/configuration/groups.xml b/symmetric-assemble/src/docbook/configuration/groups.xml deleted file mode 100644 index 45a5a84961..0000000000 --- a/symmetric-assemble/src/docbook/configuration/groups.xml +++ /dev/null @@ -1,49 +0,0 @@ -
- - Groups [<xref linkend="table_node_group" xrefstyle="table" />] - - Groups allow you to organize nodes with a similar configuration. They are the building blocks for creating group - links which define how information moves through your network. - - - - - Group ID [NODE_GROUP_ID] - - - Unique identifier for the group. - - - - - Description [DESCRIPTION] - - - Description of the group that is available through the console. - - - - - - Group Examples - - - - - Group Tips and Tricks - - A node can only belong to ONE group - - Create a group for similar nodes that will pass data in the same manner. - - - -
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/configuration/tips/group-links.xml b/symmetric-assemble/src/docbook/configuration/tips/group-links.xml deleted file mode 100644 index 407c896312..0000000000 --- a/symmetric-assemble/src/docbook/configuration/tips/group-links.xml +++ /dev/null @@ -1,5 +0,0 @@ - - The link also defines if configuration data will be synchronized on the link. For example, you might not want remote nodes to be able to change - configuration and effect other nodes in the network. In this case you would set sync_config_enabled to 0 on the appropriate link. - - \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/conflicts.xml b/symmetric-assemble/src/docbook/conflicts.xml deleted file mode 100644 index 71c526e1b7..0000000000 --- a/symmetric-assemble/src/docbook/conflicts.xml +++ /dev/null @@ -1,247 +0,0 @@ - - -
- Conflict Detection and Resolution - Conflict detection and resolution is new as of SymmetricDS 3.0. Conflict detection is the act of determining if an - insert, update or delete is in "conflict" due to the target data row not being consistent with the data at the source - prior to the insert/update/delete. Conflict resolution is the act of figuring out what to do when a conflict is - detected. - - - Conflict detection and resolution strategies are configured in the - - table. They are configured at minimum for a specific - - . The configuration can also be specific to a - - and/or table. - - - Conflict detection is configured in the - detect_type - and - detect_expression - columns of - - . The value for - detect_expression - depends on the - detect_type - . Conflicts are detected while data is being loaded into a target system. - - - - USE_PK_DATA - - - Indicates that only the primary key is used to detect a conflict. If a row exists with the same - primary key, then no conflict is detected during an update or a delete. Updates and deletes rows are - resolved using only the primary key columns. If a row already exists during an insert then a conflict - has been detected. - - - - - - USE_OLD_DATA - - - Indicates that all of the old data values are used to detect a conflict. Old data is the data - values of the row on the source system prior to the change. If a row exists with the same old values - on the target system as they were on the source system, then no conflict is detected during an update - or a delete. If a row already exists during an insert then a conflict has been detected. - - Note that some platforms do not support comparisons of binary columns. Conflicts in binary column - values will not be detected on the following platforms: DB2, DERBY, ORACLE, and SQLSERVER. - - - - - - USE_CHANGED_DATA - - - Indicates that the primary key plus any data that has changed on the source system will be used to - detect a conflict. If a row exists with the same old values on the target system as they were on the - source system for the columns that have changed on the source system, then no conflict is detected - during an update or a delete. If a row already exists during an insert then a conflict has been - detected. - - Note that some platforms do not support comparisons of binary columns. Conflicts in binary column - values will not be detected on the following platforms: DB2, DERBY, ORACLE, and SQLSERVER. - - The detect_expression can be used to exclude certain column names from being used. In order to - exclude column1 and column2, the expression would - be: excluded_column_names=column1,column2 - - - - - - USE_TIMESTAMP - - - - Indicates that the primary key plus a timestamp column (as configured in - detect_expression - ) will indicate whether a conflict has occurred. If the target timestamp column is not equal to the - old source timestamp column, then a conflict has been detected. If a row already exists during an - insert then a conflict has been detected. - - - - - - USE_VERSION - - - - Indicates that the primary key plus a version column (as configured in - detect_expression - ) will indicate whether a conflict has occurred. If the target version column is not equal to the old - source version column, then a conflict has been detected. If a row already exists during an insert - then a conflict has been detected. - - - - - - - Be aware that conflict detection will not detect changes to binary columns in - the case where use_stream_lobs is true in the trigger for the table. In addition, some - databases do not allow comparisons of binary columns whether use_stream_lobs is true or not. - - - - - The choice of how to resolve a detected conflict is configured via the resolve_type column. Depending on the setting, two additional boolean settings - may also be configured, namely resolve_row_only and resolve_changes_only, as discussed in the resolution settings below. - - - - FALLBACK - - - - Indicates that when a conflict is detected the system should automatically apply the changes anyways. - If the source operation was an insert, then an update will be attempted. If the source operation was - an update and the row does not exist, then an insert will be attempted. If the source operation was a - delete and the row does not exist, then the delete will be ignored. The - resolve_changes_only - flag controls whether all columns will be updated or only columns that have changed will be updated - during a fallback operation. - - - - - - IGNORE - - - - Indicates that when a conflict is detected the system should automatically ignore the incoming - change. The - resolve_row_only - column controls whether the entire batch should be ignore or just the row in conflict. - - - - - - MANUAL - - - - Indicates that when a conflict is detected the batch will remain in error until manual intervention - occurs. A row in error is inserted into the - - table. The conflict detection id that detected the conflict is recorded (i.e., the conflict_id value from - ), along with the old data, new data, and the "current data" - (by current data, we mean the unexpected data at the target which doesn't match the old data as expected) - in columns old_data, new_data, and cur_data. - In order to resolve, the - resolve_data - column can be manually filled out which will be used on the next load attempt instead of the original - source data. The - resolve_ignore - flag can also be used to indicate that the row should be ignored on the next load attempt. - - - - - - NEWER_WINS - - - Indicates that when a conflict is detected by USE_TIMESTAMP or USE_VERSION that the either the - source or the target will win based on the which side has the newer timestamp or higher version - number. The - resolve_row_only - column controls whether the entire batch should be ignore or just the row in conflict. - - - - - - - For each configured conflict, you also have the ability to control if and how much "resolved" data is sent back to the node who's data change is in conflict. This "ping back" behavior - is specified by the setting of the ping_back - column and can be one of the following values: - - - - OFF - - - - No data is sent back to the originating node, even if the resolved data doesn't match the data the node sent. - - - - - - SINGLE_ROW - - - - The resolved data of the single row in the batch that caused the conflict is sent back to the originating node. - - - - - - REMAINING_ROWS. - - - - The resolved data of the single row in the batch in conflict, along with the entire remainder of the batch, is sent back to the originating node. - - - - - - -
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/data-format.xml b/symmetric-assemble/src/docbook/data-format.xml deleted file mode 100644 index 1975fb61e1..0000000000 --- a/symmetric-assemble/src/docbook/data-format.xml +++ /dev/null @@ -1,225 +0,0 @@ - - - - Data Format - - The SymmetricDS Data Format is used to stream data from one node to another. The data format - reader and writer are pluggable with an initial implementation using a format based on - Comma Separated Values (CSV). Each line in the stream is a record with fields separated - by commas. String fields are surrounded with double quotes. Double quotes and - backslashes used in a string field are escaped with a backslash. Binary values are - represented as a string with hex values in "\0xab" format. The absence of any value in - the field indicates a null value. Extra spacing is ignored and lines starting with a - hash are ignored. - - - The first field of each line gives the directive for the line. The following directives - are used: - - - - - nodeid, {node_id} - - - Identifies which node the data is coming from. Occurs once in CSV file. - - - - - binary, {BASE64|NONE|HEX} - - - Identifies the type of decoding the loader needs to use to decode binary data in the pay load. This varies depending on what database is the source of the data. - - - - - channel, {channel_id} - - - Identifies which channel a batch belongs to. The SymmetricDS data loader expects the channel to be specified before the batch. - - - - - batch, {batch_id} - - - Uniquely identifies a batch. Used to track whether a batch has been loaded before. A batch of -9999 is considered a virtual batch and will be loaded, but will not be recorded in incoming_batch. - - - - - schema, {schema name} - - - The name of the schema that is being targeted. - - - - - catalog, {catalog name} - - - The name of the catalog that is being targeted. - - - - - table, {table name} - - - The name of the table that is being targeted. - - - - - keys, {column name...} - - - - Lists the column names that are used as the primary key for the table. - Only needs to occur after the first occurrence of the table. - - - - - - columns, {column name...} - - - - Lists all the column names (including key columns) of the table. Only needs to occur after the - first occurrence of the table. - - - - - - insert, {column value...} - - - - Insert into the table with the values that correspond with the columns. - - - - - - update, {new column value...},{old key value...} - - - - Update the table using the old key values to set the new column values. - - - - - - old, {old column value...} - - - - Represent all the old values of the data. This data can be used for conflict - resolution. - - - - - - delete, {old key value...} - - - Delete from the table using the old key values. - - - - - sql, {sql statement} - - - Optional notation that instructs the data loader to run the accompanying SQL statement. - - - - - bsh, {bsh script} - - - Optional notation that instructs the data loader to run the accompanying BeanShell snippet. - - - - - create, {xml} - - - Optional notation that instructs the data loader to run the accompanying DdlUtils XML table definition in order to create a database table. - - - - - commit, {batch_id} - - - An indicator that the batch has been transmitted and the data can be committed to the database. - - - - - - Data Format Stream - - - - - diff --git a/symmetric-assemble/src/docbook/databases.xml b/symmetric-assemble/src/docbook/databases.xml deleted file mode 100644 index 594515bdd5..0000000000 --- a/symmetric-assemble/src/docbook/databases.xml +++ /dev/null @@ -1,782 +0,0 @@ - - - - Database Notes - - Each database management system has its own characteristics that results in - feature coverage in SymmetricDS. The following table shows which features are available - by database. - - - - Support by Database - - - - - - - - - - - Database - Versions supported - Transaction Identifier - Data Capture - Conditional Sync - Update Loop Prevention - BLOB Sync - CLOB Sync - - - - - Oracle - 10g and above - Y - Y - Y - Y - Y - Y - - - MySQL - 5.0.2 and above - Y - Y - Y - Y - Y - Y - - - MariaDB - 5.1 and above - Y - Y - Y - Y - Y - Y - - - PostgreSQL - 8.2.5 and above - Y (8.3 and above only) - Y - Y - Y - Y - Y - - - Greenplum - 8.2.15 and above - N - N - N - Y - N - N - - - SQL Server - 2005 and above - Y - Y - Y - Y - Y - Y - - - SQL Server Azure - Tested on 11.00.2065 - Y - Y - Y - Y - Y - N - - - HSQLDB - 1.8 - Y - Y - Y - Y - Y - Y - - - HSQLDB - 2.0 - N - Y - Y - Y - Y - Y - - - H2 - 1.x - Y - Y - Y - Y - Y - Y - - - Apache Derby - 10.3.2.1 - Y - Y - Y - Y - Y - Y - - - IBM DB2 - 9.5 - N - Y - Y - Y - Y - Y - - - Firebird - 2.0 - Y - Y - Y - Y - Y - Y - - - Informix - 11 - N - Y - Y - Y - N - N - - - Interbase - 9.0 - N - Y - Y - Y - Y - Y - - - SQLite - 3.x - N - Y - Y - Y - Y - Y - - - Active Server Enterprise - 12.5 - Y - Y - Y - Y - Y - Y - - - SQL Anywhere - 9 - Y - Y - Y - Y - Y - Y - - - Redshift - 1.0 - N - N - N - Y - N - N - - - -
-
-
- Oracle - SymmetricDS has bulk loading capability available for Oracle. SymmetricDS specifies data loader types on - a channel by channel basis. To utilize Oracle Bulk loading versus straight JDBC insert, specify the Oracle Bulk - Loader ("oracle_bulk") in the data_loader_type column of sym_channel. - - - While BLOBs are supported on Oracle, the LONG data type is not. LONG columns cannot be accessed from triggers. - - - Note that while Oracle supports multiple triggers of the same type to be defined, the order - in which the triggers occur appears to be arbitrary. - - - The SymmetricDS user generally needs privileges for connecting and creating - tables (including indexes), triggers, sequences, and procedures (including packages and functions). - The following is an example of the needed grant statements: - - - - Partitioning the table by channel can help - insert, routing and extraction performance on concurrent, high throughput systems. - s should be organized to put data that is - expected to be inserted concurrently on separate s. The following is an example of - partitioning. Note that both the table and the index should be partitioned. The default - value allows for more channels to be added without having to modify the partitions. - - - - Note also that, for Oracle, you can control the amount of precision used by the Oracle triggers - with the parameter oracle.template.precision, which defaults to a precision of 30,10. - - If the following Oracle error 'ORA-01489: result of string concatenation is too long' is encountered - you might need to set use_capture_lobs to 1 on in the table - and resync the triggers. The error can happen when the captured data in a row exceeds 4k and lob columns do not exist - in the table. By enabling use_capture_lobs the concatanated varchar string is cast to a clob which - allows a length of more than 4k. - -
-
- MySQL - - MySQL supports several storage engines for different table types. SymmetricDS requires - a storage engine that handles transaction-safe tables. The recommended storage engine - is InnoDB, which is included by default in MySQL 5.0 distributions. - Either select the InnoDB engine during installation or modify your server configuration. - To make InnoDB the default storage engine, modify your MySQL server configuration file - (my.ini on Windows, my.cnf on Unix): - default-storage_engine = innodb - Alternatively, you can convert tables to the InnoDB storage engine with the following - command: - alter table t engine = innodb; - - - On MySQL 5.0, the SymmetricDS user needs the SUPER privilege in order to create triggers. - - grant super on *.* to symmetric; - - On MySQL 5.1, the SymmetricDS user needs the TRIGGER and CREATE ROUTINE privileges - in order to create triggers and functions. - - grant trigger on *.* to symmetric; - grant create routine on *.* to symmetric; - - - - MySQL allows '0000-00-00 00:00:00' to be entered as a value for datetime and timestamp columns. - JDBC cannot deal with a date value with a year of 0. In order to work around this SymmetricDS - can be configured to treat date and time columns as varchar columns for data capture and data - load. To enable this feature set the db.treat.date.time.as.varchar.enabled property - to true. - - - If you are using UTF-8 encoding in the database, you might consider using the - characterEncoding parameter in the JDBC URL. - - -
-
- MariaDB - - See MySQL notes. In addition, you will need to use a MySQL driver for this dialect. - -
-
- PostgreSQL - SymmetricDS has bulk loading capability available for Postgres. SymmetricDS specifies data loader types on - a channel by channel basis. To utilize Postgres Bulk loading versus straight JDBC insert, specify the Postgres Bulk - Loader ("postgres_bulk") in the data_loader_type column of sym_channel. - - - Starting with PostgreSQL 8.3, SymmetricDS supports the transaction identifier. - Binary Large Object (BLOB) replication is supported for both byte array (BYTEA) - and object ID (OID) data types. - - - In order to function properly, SymmetricDS needs to use session variables. - On PostgreSQL, session variables are enabled using a custom variable class. - Add the following line to the postgresql.conf file - of PostgreSQL server: - - - - This setting is required, and SymmetricDS will log an error and exit if it is not present. - - - Before database triggers can be created by in PostgreSQL, - the plpgsql language handler must be installed on the database. - The following statements should be run by the administrator on the database: - - - - - If you want SymmetricDS to install into a schema other than public you should alter the database user to set the default schema. - - In addition, you will likely need the follow privelegdes as well: - - - -
-
- Greenplum - - Greenplum is a data warehouse based on PostgreSQL. It is supported as a target platform in SymmetricDS. - - - SymmetricDS has bulk loading capability available for Greenplum. SymmetricDS specifies data loader types on - a channel by channel basis. To utilize Greenplum Bulk loading versus straight JDBC insert, specify the Postgres Bulk - Loader ("postgres_bulk") in the data_loader_type column of sym_channel. - - -
-
- MS SQL Server - - SQL Server was tested using the - - jTDS - - JDBC driver. - - - SQL Server allows the update of primary key fields via the SQL update statement. If your application allows updating of the primary key - field(s) for a table, and you want those updates synchronized, you will need to set the "Handle Key Updates" field on the trigger record for - that specific table. The default for Handle Key Updates is false. - -
-
- HSQLDB - - HSQLDB was implemented with the intention that the database be run embedded in the same JVM process - as SymmetricDS. Instead of dynamically generating static SQL-based triggers like the other databases, HSQLDB - triggers are Java classes that re-use existing SymmetricDS services to read the configuration and insert data events - accordingly. - - - The transaction identifier support is based on SQL events that happen in a 'window' of time. The trigger(s) track when the - last trigger fired. If a trigger fired within X milliseconds of the previous firing, then the current event gets the same - transaction identifier as the last. If the time window has passed, then a new transaction identifier is generated. - -
-
- H2 - - The H2 database allows only Java-based triggers. Therefore the H2 dialect requires that the SymmetricDS jar file be in the database's classpath. - -
-
- Apache Derby - - The Derby database can be run as an embedded database that is accessed by an application - or a standalone server that can be accessed from the network. - This dialect implementation creates database triggers that make method calls into - Java classes. This means that the supporting JAR files need to be in the classpath when - running Derby as a standalone database, which includes symmetric-ds.jar and - commons-lang.jar. - -
-
- IBM DB2 - - The DB2 Dialect uses global variables to enable and disable node and trigger synchronization. - These variables are created automatically during the first startup. - The DB2 JDBC driver should be placed in the "lib" folder. - - - Currently, the DB2 Dialect for SymmetricDS does not provide support for transactional synchronization. - Large objects (LOB) are supported, but are limited to 16,336 bytes in size. - The current features in the DB2 Dialect have been tested using DB2 9.5 on Linux and Windows operating systems. - - - There is currently a bug with the retrieval of auto increment columns with the DB2 9.5 JDBC drivers that causes - some of the SymmetricDS configuration tables to be rebuilt when auto.config.database=true. The DB2 9.7 JDBC drivers - seem to have fixed the issue. They may be used with the 9.5 database. - - - A system temporary tablespace with too small of a page size may cause the following trigger build errors: - - Simply create a system temporary tablespace that has a bigger page size. A page size of 8k will probably suffice. - -
-
- Firebird - - The Firebird Dialect requires the installation of a User Defined Function (UDF) library - in order to provide functionality needed by the database triggers. - SymmetricDS includes the required UDF library, called SYM_UDF, in both source form - (as a C program) and as pre-compiled libraries for both Windows and Linux. - The SYM_UDF library is copied into the UDF folder within the Firebird installation directory. - - - For Linux users: - - - cp databases/firebird/sym_udf.so /opt/firebird/UDF - - - For Windows users: - - - copy databases\firebird\sym_udf.dll C:\Program Files\Firebird\Firebird_2_0\UDF - - - The following limitations currently exist for this dialect: - - - - - - The outgoing batch does not honor the channel size, and all - outstanding data events are included in a batch. - - - - - Syncing of Binary Large Object (BLOB) is limited to 16K bytes per column. - - - - - Syncing of character data is limited to 32K bytes per column. - - - - -
-
- Informix - - The Informix Dialect was tested against Informix Dynamic Server 11.50, but older versions - may also work. You need to download the Informix JDBC Driver (from the - IBM Download Site) - and put the ifxjdbc.jar and ifxlang.jar files - in the SymmetricDS lib folder. - - - Make sure your database has logging enabled, which enables transaction - support. Enable logging when creating the database, like this: - - - Or enable logging on an existing database, like this: - - - - The following features are not yet implemented: - - - - - - Syncing of Binary and Character Large Objects (LOB) is disabled. - - - - - There is no transaction ID recorded on data captured, so it is possible for data - to be committed within different transactions on the target database. - If transaction synchronization is required, either specify a custom transaction ID - or configure the synchronization so data is always sent in a single batch. - A custom transaction ID can be specified with the tx_id_expression on - . - The batch size is controlled with the max_batch_size on - . - The pull and push jobs have runtime properties to control their interval. - - - - -
-
- Interbase - - The Interbase Dialect requires the installation of a User Defined Function (UDF) library - in order to provide functionality needed by the database triggers. - SymmetricDS includes the required UDF library, called SYM_UDF, in both source form - (as a C program) and as pre-compiled libraries for both Windows and Linux. - The SYM_UDF library is copied into the UDF folder within the Interbase installation directory. - - - For Linux users: - - - cp databases/interbase/sym_udf.so /opt/interbase/UDF - - - For Windows users: - - - copy databases\interbase\sym_udf.dll C:\CodeGear\InterBase\UDF - - - The Interbase dialect currently has the following limitations: - - - - - Data capture is limited to 4 KB per row, including large objects (LOB). - - - - - There is no transaction ID recorded on data captured. - Either specify a tx_id_expression on the - table, - or set a max_batch_size on the - table that - will accommodate your transactional data. - - - -
-
- SQLite - - For SQLite, the implementation of sync-on-incoming back and the population of a source node if in the sym data rows relies - on use of a context table (by default, called sym_context) to hold a boolean and node id in place of the more common methods - of using temp tables (which are inaccessible from triggers) or functions (which are not available). The context table assumes - there's a single thread updating the database at any onetime. If that is not the case in the future, the current implementation of - sync on incoming batch will be unreliable. - - Nodes using SQLite should have the jobs.synchronized.enable parameter set to true. This parameter - causes the jobs and push/pull threads to all run in a synchronized fashion, which is needed in the case of SQLite. - - - The SQLite dialect has the following limitations: - - - - There is no transaction ID recorded on data captured. - Either specify a tx_id_expression on the - table, - or set a max_batch_size on the - table that - will accommodate your transactional data. - - - - - Due to the single threaded access to SQLite, the - following parameter should be set to true: jobs.synchronized.enable. - - - - -
-
- Sybase Active Server Enterprise - - Active Server Enterprise (ASE) was tested using the jConnect JDBC driver. The jConnect JDBC driver should be placed in the "lib" folder. - - - Columns of type DATETIME are accurate to 1/300th of a second, which means that the last digit of the milliseconds portion will end with 0, 3, or 6. - An incoming DATETIME synced from another database will also have its millisconds rounded to one of these digits - (0 and 1 become 0; 2, 3, and 4 become 3; 5, 6, 7, and 8 become 6; 9 becomes 10). - If DATETIME is used as the primary key or as one of the columns to detect a conflict, then conflict resolution could fail unless - the milliseconds are rounded in the same fashion on the source system. - - - On ASE, each new trigger in a table for the same operation (insert, update, or delete) overwrites the previous one. - No warning message displays before the overwrite occurs. When SymmetricDS is installed and configured to synchronize a table, it - will install triggers that could overwrite already existing triggers on the database. New triggers created after SymmetricDS is installed - will overwrite the SymmetricDS triggers. Custom trigger text can be added to the SymmetricDS triggers by modifying - CUSTOM_ON_INSERT_TEXT, CUSTOM_ON_UPDATE_TEXT, and CUSTOM_ON_DELETE_TEXT on the table. - -
-
- Sybase SQL Anywhere - - SQL Anywhere was tested using the jConnect JDBC driver. The jConnect JDBC driver should be placed in the "lib" folder. - -
-
- Redshift - - Redshift is a managed data warehouse in the cloud from Amazon. Version 1.0 of Redshift is based on PostgreSQL 8.0, with - some features modified or removed. SymmetricDS supports Redshift as a target platform where data can be loaded, but it does - not support data capture. However, the initial load and reload functions are implemented, so it is possible to query - rows from Redshift tables and send them to another database. - - - - While Redshift started with PostgreSQL 8.0, there are some important differences from PostgreSQL. - Redshift does not support constraints, indexes, functions, triggers, or sequences. - Primary keys, foreign keys, and unique indexes can be defined on tables, but they are informational metadata that are not - enforced by the system. When using the default data loader with SymmetricDS, it will enforce primary keys, either - defined in the database or with the sync keys features, by checking if a row exists before attempting an insert. - However, the bulk loader does not perform this check. - The data types supported are smallint, integer, bigint, decimal, real, double precision, boolean, char, varchar, date, - and timestamp. - - - - A data loader named "redshift_bulk" is a bulk loader that can be set for a channel to improve loading performance. - Instead of sending individual SQL statements to the database, it creates a comma separated value (CSV) file, uploads - the object to Amazon S3, and uses the COPY statement to load it. The COPY command appends the new data to any - existing rows in the table. If the target table has any IDENTITY columns, the EXPLICIT_IDS option is enabled - to override the auto-generated values and load the incoming values. The following parameters - (see ) - can be set for bulk loader: - - - - redshift.bulk.load.max.rows.before.flush - - When the max rows is reached, the flat file is sent to S3 and loaded into the database. - The default is 100,000 rows. - - - redshift.bulk.load.max.bytes.before.flush - - When the max bytes is reached, the flat file is sent to S3 and loaded into the database. - The default is 1,000,000,000 bytes. - - - redshift.bulk.load.s3.bucket - - The S3 bucket name where files are uploaded. This bucket should be created from the - AWS console ahead of time. - - - redshift.bulk.load.s3.access.key - - The AWS access key ID to use as credentials for uploading to S3 and loading from S3. - - - redshift.bulk.load.s3.secret.key - - The AWS secret key to use as credentials for uploading to S3 and loading from S3. - - - - - - To clean and organize tables after bulk changes, it is recommended to run a "vacuum" against individual tables or - the entire database so that consistent query performance is maintained. - Deletes and updates mark rows for delete that are not automatically reclaimed. New rows are stored - in a separate unsorted region, forcing queries to sort on demand. - Consider running a "vacuum" periodically during a maintenance window when there is minimal query activity that will - be affected. If large batches are continually loaded from SymmetricDS, the "vacuum" command can be run - after committing a batch by using a load filter (see ) - for the "batch commit" event, like this: - - - - - -
-
diff --git a/symmetric-assemble/src/docbook/developer.xml b/symmetric-assemble/src/docbook/developer.xml deleted file mode 100644 index 60fcacdbd5..0000000000 --- a/symmetric-assemble/src/docbook/developer.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - Developer - - This chapter focuses on a variety of ways for developers to build upon and extend some of the existing features found within SymmetricDS. - - - - - - diff --git a/symmetric-assemble/src/docbook/dia/data-model-config-grouplets.dia b/symmetric-assemble/src/docbook/dia/data-model-config-grouplets.dia deleted file mode 100644 index 9e9626f4b0..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/data-model-config-grouplets.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/data-model-config.dia b/symmetric-assemble/src/docbook/dia/data-model-config.dia deleted file mode 100644 index 3c9a408f59..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/data-model-config.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/data-model-runtime.dia b/symmetric-assemble/src/docbook/dia/data-model-runtime.dia deleted file mode 100644 index e4d8b40ec7..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/data-model-runtime.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/multi-home-combined.odg b/symmetric-assemble/src/docbook/dia/multi-home-combined.odg deleted file mode 100644 index 33312d67f8..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/multi-home-combined.odg and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/multi-home-separate.odg b/symmetric-assemble/src/docbook/dia/multi-home-separate.odg deleted file mode 100644 index 7a56f80aa3..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/multi-home-separate.odg and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/node-communication.dia b/symmetric-assemble/src/docbook/dia/node-communication.dia deleted file mode 100644 index 6ffb0c7383..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/node-communication.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/overview-1.odg b/symmetric-assemble/src/docbook/dia/overview-1.odg deleted file mode 100644 index e5e2394d86..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/overview-1.odg and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/seq-node-communication.dia b/symmetric-assemble/src/docbook/dia/seq-node-communication.dia deleted file mode 100644 index 4b38e8c109..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/seq-node-communication.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/seq-pull-client.dia b/symmetric-assemble/src/docbook/dia/seq-pull-client.dia deleted file mode 100644 index 66b48c4405..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/seq-pull-client.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/seq-pull-server.dia b/symmetric-assemble/src/docbook/dia/seq-pull-server.dia deleted file mode 100644 index e56cb8c543..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/seq-pull-server.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/seq-startup.dia b/symmetric-assemble/src/docbook/dia/seq-startup.dia deleted file mode 100644 index 0882e116f9..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/seq-startup.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/three-tier-arch.dia b/symmetric-assemble/src/docbook/dia/three-tier-arch.dia deleted file mode 100644 index 9af66c304d..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/three-tier-arch.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/three-tier-regional-arch.dia b/symmetric-assemble/src/docbook/dia/three-tier-regional-arch.dia deleted file mode 100644 index 29ff91073c..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/three-tier-regional-arch.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/tutorial-arch.dia b/symmetric-assemble/src/docbook/dia/tutorial-arch.dia deleted file mode 100644 index 1f462c7dd6..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/tutorial-arch.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/dia/two-tier-arch.dia b/symmetric-assemble/src/docbook/dia/two-tier-arch.dia deleted file mode 100644 index 3e3c7fa5d3..0000000000 Binary files a/symmetric-assemble/src/docbook/dia/two-tier-arch.dia and /dev/null differ diff --git a/symmetric-assemble/src/docbook/extensions.xml b/symmetric-assemble/src/docbook/extensions.xml deleted file mode 100644 index c609abce18..0000000000 --- a/symmetric-assemble/src/docbook/extensions.xml +++ /dev/null @@ -1,267 +0,0 @@ - - -
- Extension Points - - SymmetricDS has a pluggable architecture that can be extended. A Java class that implements - the appropriate extension point interface, can implement custom logic and change the behavior - of SymmetricDS to suit special needs. All supported extension - points extend the IExtensionPoint interface. The available extension points are documented in the following sections. - - - When SymmetricDS starts up, the ExtensionPointManager searches a Spring Framework - context for classes that implement the IExtensionPoint interface, then creates and registers - the class with the appropriate SymmetricDS component. - - - Extensions should be configured in the conf/symmetric-extensions.xml file as Spring beans. The jar file that contains - the extension should be placed in the web/WEB-INF/lib directory. - - - If an extension point needs access to SymmetricDS services or needs to connect to the database - it may implement the ISymmetricEngineAware interface in order to - get a handle to the ISymmetricEngine. - - - The INodeGroupExtensionPoint interface may be optionally implemented to indicate that a registered - extension point should only be registered with specific node groups. - - -
- IParameterFilter - - Parameter values can be specified in code using a parameter filter. Note that there can be only one parameter - filter per engine instance. The IParameterFilter replaces the deprecated IRuntimeConfig from prior releases. - - -
-
- IDatabaseWriterFilter - - Data can be filtered or manipulated before it is loaded into the target database. - A filter can change the - data in a column, save it somewhere else or do something else with the data entirely. - It can also specify by the - return value of the function call that the data loader should continue on - and load the data (by returning true) or ignore it (by returning false). One - possible use of the filter, for example, might be to - route credit card data to a secure database and blank it out as it loads - into a less-restricted reporting database. - - - A DataContext is passed to each of the callback methods. A new - context is created for each synchronization. The context provides a mechanism - to share data during the load of a batch between different rows of data that are - committed in a single database transaction. - - - The filter also provides callback methods for the batch lifecycle. The DatabaseWriterFilterAdapter - may be used if not all methods are required. - - - A class implementing the IDatabaseWriterFilter interface is injected onto the - DataLoaderService in order to receive callbacks when data is inserted, - updated, or deleted. - - - - - The filter class should be specified in conf/symmetric-extensions.xml as follows. - - - - - - -]]> - -
- -
- IDatabaseWriterErrorHandler - - Implement this extension point to override how errors are handled. You can use this extension point to ignore rows that produce foreign key errors. - -
- -
- IDataLoaderFactory - - Implement this extension point to provide a different implementation of the org.jumpmind.symmetric.io.data.IDataWriter that - is used by the SymmetricDS data loader. Data loaders are configured for a channel. After this extension point is registered it can - be activated for a by indicating the data loader name in the data_loader_type column. - - - SymmetricDS has two out of the box extensions of IDataLoaderFactory already implemented in its PostgresBulkDataLoaderFactory - and OracleBulkDataLoaderFactory classes. These extension points implement bulk data loading capabilities for Oracle, - Postgres and Greenplum dialects. See Appendix C. Database Notes for details. - - - Another possible use of this extension point is to route data to a NOSQL data sink. - -
-
- IAcknowledgeEventListener - - Implement this extension point to receive callback events when a batch is acknowledged. - The callback for this listener happens at the point of extraction. - -
-
- IReloadListener - - Implement this extension point to listen in and take - action before or after a reload is requested for a Node. The callback for this listener - happens at the point of extraction. - -
-
- ISyncUrlExtension - - This extension point is used to select an appropriate URL based on - the URI provided in the sync_url column of sym_node. - - - To use this extension point configure the sync_url for a node with the - protocol of ext://beanName. The beanName is the name you give the extension - point in the extension xml file. - -
-
- IColumnTransform - - This extension point allows custom column transformations to be created. There are a handful of - out-of-the-box implementations. If any of these do not meet the column transformation needs of - the application, then a custom transform can be created and registered. It can be activated - by referencing the column transform's name transform_type column of - - -
-
- INodeIdCreator - - This extension point allows SymmetricDS users to implement their own algorithms for how - node ids and passwords are generated or selected during the registration process. There may be - only one node creator per SymmetricDS instance (Please note that the node creator extension has replaced the node generator extension). - -
-
- ITriggerCreationListener - - Implement this extension point to get status callbacks during trigger creation. - -
-
- IBatchAlgorithm - - Implement this extension point and set the name of the Spring bean on the batch_algorithm column of the Channel table to use. - This extension point gives fine grained control over how a channel is batched. - -
-
- IDataRouter - - Implement this extension point and set the name of the Spring bean on the router_type column - of the Router table to use. This extension point gives the ability to programmatically decide - which nodes data should be routed to. - -
-
- IHeartbeatListener - - Implement this extension point to get callbacks during the heartbeat job. - -
-
- IOfflineClientListener - - Implement this extension point to get callbacks for offline events on client nodes. - -
-
- IOfflineServerListener - - Implement this extension point to get callbacks for offline events detected on a server node during monitoring of client nodes. - -
-
- INodePasswordFilter - - Implement this extension point to intercept the saving and rendering of the node password. - -
-
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/file-sync.xml b/symmetric-assemble/src/docbook/file-sync.xml deleted file mode 100644 index 9cf48216a2..0000000000 --- a/symmetric-assemble/src/docbook/file-sync.xml +++ /dev/null @@ -1,322 +0,0 @@ - - -
- File Triggers / File Synchronization - -
-Overview - - SymmetricDS not only supports the synchronization of -database tables, but it also supports the synchronization of files and folders -from one node to another. - -File synchronization features include: - - - Monitoring one or more file system directory locations for file and folder changes - Support synchronizing a different target directory than the source directory - Use of wild card expressions to “include” or -“exclude” files - Choice of whether to recurse into subfolders -of monitored directories - Use of existing SymmetricDS routers to subset -target nodes based on file and directory metadata - Ability to specify if files will be synchronized on -creation, or deletion, and/or modification - Ability to specify the frequency with which file systems are -monitored for changes - Ability to extend file synchronization through -scripts that run before or after a file is copied to its source location - - Support for bidirectional file synchronization - - - Like database synchronization, file synchronization is -configured in a series of database tables. The configuration was -designed to be similar to database synchronization in order to maintain -consistency and to give database synchronization users a sense of -familiarity. -For database synchronization, SymmetricDS uses - to configure which tables will capture data for synchronization -and to designate which nodes will be the source of data changes -and which nodes will receive the data changes. - links triggers to routers. - Likewise, for file synchronization, SymmetricDS uses to designate which base directories will be monitored. -Each entry in designates one base directory to monitor for changes on -the source system. The columns on provide additional -settings for choosing specific files in the base directory that will be monitored, and whether to recurse into subdirectories, etc. File triggers are linked to routers by -. The file trigger router not only links the source -and the target node groups, but it also optionally provides the ability to -override the base directory name at the target. also -provides a flag that indicates if the target node should be seeded with -the files from the source node during SymmetricDS's initial load -process. -
-
-Operation - Not only is file synchronization configured similar to database synchronization, but it also operates in a very similar way. The file system is monitored for changes via a -background job that tracks the file system changes -(this parallels the use of triggers to monitor for changes when synchronizing database changes). -When a change is detected it is written to the -table. The file snapshot table represents the most recent known state of the -monitored files. The file snapshot table has a SymmetricDS database trigger automatically installed -on it so that when it is updated the changes are captured by SymmetricDS on an internal -channel named filesync. - The changes to are then routed and batched by a file-synchronization-specific router -that delegates to the configured router -based on the configuration. The - file sync router can -make routing decisions based on the column data of the snapshot table, columns which contain attributes of the file like the name, path, -size, and last modified time. Both old and new file snapshot data are also -available. The router can, for example, parse the path or name of the -file and use it as the node id to route to. - Batches of file snapshot changes are stored on the -filesync channel in . The existing SymmetricDS pull and -push jobs ignore the filesync channel. Instead, they are processed by -file-synchronization-specific push and pull jobs. - When transferring data, the file sync push and pull jobs build a zip -file dynamically based on the batched snapshot data. The -zip file contains a directory per batch. The directory name is the -batch_id. A sync.bsh Bean Shell -script is generated and placed in the root of each batch directory. The Bean Shell script contains the commands to copy -or delete files at their file destination from an extracted zip in the staging directory on the -target node. The zip file is downloaded in the -case of a pull, or, in the case of a push, is uploaded as an HTTP multi-part attachment. -Outgoing zip files are written and transferred from the -outgoing staging directory. Incoming zip files are staged in the -filesync_incoming staging directory by source node id. The -filesync_incoming/{node_id} staging directory is cleared out before each -subsequent delivery of files. - The acknowledgement of a batch happens the same way it is acknowledged in database synchronization. The client responds with an acknowledgement as part of the response -during a file push or pull. -
- -
-File Sync Bean Shell Scripts - There are two types of Bean Shell scripts that can be -leveraged to customize file synchronization behavior: before_copy_script -and after_copy_script. - -Each of these scripts have access to local variables that can be read or -set to affect the behavior of copying files. - - - - -targetBaseDir - - - The preset base directory as configured in or -overwritten in . This variable can be set by the -before_copy_script to set a different target directory. - - - - -targetFileName - - - The name of the file that is being synchronized. This variable can be overwritten by the -before_copy_script to rename a file at the target. - - - - -targetRelativeDir - - - The name of a directory relative to the target base directory to which the target file will be copied. The -default value of this variable is the relative directory of the source. For example, if the source base directory is -/src and the target base directory is /tgt and the file /src/subfolder/1.txt -is changed, then the default targetRelativeDir will be subfolder. -This variable can be overwritten by the -before_copy_script to change the relative directory at the target. In the above example, if the variable is -set to blank using the following script, then the target file will be copied to /tgt/1.txt. - -targetRelativeDir = ""; - - - - - - -processFile - - -This is a variable that is set to true by default. A custom -before_copy_script may process the file itself and set this variable to -false to indicate that the file should NOT be copied to its target -location. - - - - -sourceFileName - - -This is the name of the file. - - - - -sourceFilePath - - -This is the path where the file can be found relative to -the batch directory. - - - - -batchDir - - -This is the staging directory where the batch has been -extracted. The batchDir + sourceFilePath + sourceFileName can be used to -locate the extracted file. - - - - -engine - - -This is the bound instance of the ISymmetricEngine that is -processing a file. It gives access to all of the APIs available in -SymmetricDS. - - - - -sourceNodeId - - -This is a bound variable that represents the nodeId that is -the source of the file. - - - - -log - - -This is the bound instance of an org.slf4j.Logger that can -be used to log to the SymmetricDS log file. - - - - - -
-
-File Sync Examples - -
-Sync Text Files From Server To Client - -The following example is for a configuration with client and server node -groups. Creation, modification, and deletion of files with the extension -of txt will be captured recursively -in the /filesync/server/all -directory. A before copy script will set the targetBaseDir to -/filesync/clients/{externalId}. - -INSERT INTO sym_file_trigger - (trigger_id,base_dir,recurse,includes_files,excludes_files,sync_on_create, - sync_on_modified,sync_on_delete,sync_on_ctl_file,delete_after_sync,before_copy_script,after_copy_script, - create_time,last_update_by,last_update_time) -VALUES ('sync_directory','/filesync/server/all',1,'*.txt',null,1,1,1,0,0, - 'targetBaseDir = "/filesync/clients/" + - engine.getParameterService().getExternalId();',null,current_timestamp,'example', - current_timestamp); - -INSERT INTO sym_file_trigger_router - (trigger_id,router_id,enabled,initial_load_enabled,target_base_dir, - conflict_strategy,create_time,last_update_by,last_update_time) -VALUES - ('sync_directory','server_2_client',1,1,'','SOURCE_WINS',current_timestamp, - 'example',current_timestamp); - -INSERT INTO sym_router - (router_id,target_catalog_name,target_schema_name,target_table_name, - source_node_group_id,target_node_group_id, - router_type,router_expression,sync_on_update,sync_on_insert,sync_on_delete, - create_time,last_update_by,last_update_time) -VALUES - ('server_2_client',null,null,null,'server','client','default',null,1,1,1, - current_timestamp,'example',current_timestamp); - - -
- -
-Route changes to a specific node based on a directory -name - -The following example is also for a configuration with client and server -node groups. This example monitors the /filesync/server/nodes directory. -It expects the directory to contain subdirectories that are named by the node_ids -in the client group. Any files put directly into a folder with the name -of the node will be routed to that node. - - -Note that the router is a that is matching the client node_id with the value of the RELATIVE_DIR column in -. Because the router is looking for an exact match any files in subdirectories would result in a path -of node_id/subdir which would not match. - - - - -
-
- - - -
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/images/data-model-config.gif b/symmetric-assemble/src/docbook/images/data-model-config.gif deleted file mode 100644 index 0078f0cf96..0000000000 Binary files a/symmetric-assemble/src/docbook/images/data-model-config.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/data-model-runtime.gif b/symmetric-assemble/src/docbook/images/data-model-runtime.gif deleted file mode 100644 index 286f4c4f28..0000000000 Binary files a/symmetric-assemble/src/docbook/images/data-model-runtime.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/multi-home-combined.gif b/symmetric-assemble/src/docbook/images/multi-home-combined.gif deleted file mode 100644 index fda547594d..0000000000 Binary files a/symmetric-assemble/src/docbook/images/multi-home-combined.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/multi-home-separate.gif b/symmetric-assemble/src/docbook/images/multi-home-separate.gif deleted file mode 100644 index 4adea31a71..0000000000 Binary files a/symmetric-assemble/src/docbook/images/multi-home-separate.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/node-communication.gif b/symmetric-assemble/src/docbook/images/node-communication.gif deleted file mode 100644 index ae018502fe..0000000000 Binary files a/symmetric-assemble/src/docbook/images/node-communication.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/overview-1.gif b/symmetric-assemble/src/docbook/images/overview-1.gif deleted file mode 100644 index 88e342e05d..0000000000 Binary files a/symmetric-assemble/src/docbook/images/overview-1.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/seq-node-communication.gif b/symmetric-assemble/src/docbook/images/seq-node-communication.gif deleted file mode 100644 index b92c63a567..0000000000 Binary files a/symmetric-assemble/src/docbook/images/seq-node-communication.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/seq-pull-client.gif b/symmetric-assemble/src/docbook/images/seq-pull-client.gif deleted file mode 100644 index dbac79c18e..0000000000 Binary files a/symmetric-assemble/src/docbook/images/seq-pull-client.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/seq-startup.gif b/symmetric-assemble/src/docbook/images/seq-startup.gif deleted file mode 100644 index e777e6c89e..0000000000 Binary files a/symmetric-assemble/src/docbook/images/seq-startup.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/software-stack.gif b/symmetric-assemble/src/docbook/images/software-stack.gif deleted file mode 100644 index 34bf32d1c6..0000000000 Binary files a/symmetric-assemble/src/docbook/images/software-stack.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/symmetric_war.gif b/symmetric-assemble/src/docbook/images/symmetric_war.gif deleted file mode 100644 index 75b0eaeee7..0000000000 Binary files a/symmetric-assemble/src/docbook/images/symmetric_war.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-1.png b/symmetric-assemble/src/docbook/images/sync-android-1.png deleted file mode 100644 index 016b2bd0a7..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-1.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-2.png b/symmetric-assemble/src/docbook/images/sync-android-2.png deleted file mode 100644 index ab4287ba52..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-2.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-3.png b/symmetric-assemble/src/docbook/images/sync-android-3.png deleted file mode 100644 index de9de99eaa..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-3.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-4.png b/symmetric-assemble/src/docbook/images/sync-android-4.png deleted file mode 100644 index 3cc61ed3b2..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-4.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-5.png b/symmetric-assemble/src/docbook/images/sync-android-5.png deleted file mode 100644 index a018156204..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-5.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-6.png b/symmetric-assemble/src/docbook/images/sync-android-6.png deleted file mode 100644 index 782f630c0a..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-6.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-7.png b/symmetric-assemble/src/docbook/images/sync-android-7.png deleted file mode 100644 index d954e70a44..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-7.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-8.png b/symmetric-assemble/src/docbook/images/sync-android-8.png deleted file mode 100644 index f9636aa47a..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-8.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/sync-android-9.png b/symmetric-assemble/src/docbook/images/sync-android-9.png deleted file mode 100644 index 7be167511a..0000000000 Binary files a/symmetric-assemble/src/docbook/images/sync-android-9.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/three-tier-arch.gif b/symmetric-assemble/src/docbook/images/three-tier-arch.gif deleted file mode 100644 index 44c081af51..0000000000 Binary files a/symmetric-assemble/src/docbook/images/three-tier-arch.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/three-tier-regional-arch.gif b/symmetric-assemble/src/docbook/images/three-tier-regional-arch.gif deleted file mode 100644 index a33429783c..0000000000 Binary files a/symmetric-assemble/src/docbook/images/three-tier-regional-arch.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/tutorial-arch.gif b/symmetric-assemble/src/docbook/images/tutorial-arch.gif deleted file mode 100644 index d6ebf5d7c3..0000000000 Binary files a/symmetric-assemble/src/docbook/images/tutorial-arch.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/images/two-tier-arch.gif b/symmetric-assemble/src/docbook/images/two-tier-arch.gif deleted file mode 100644 index 8253faa2ae..0000000000 Binary files a/symmetric-assemble/src/docbook/images/two-tier-arch.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/introduction.xml b/symmetric-assemble/src/docbook/introduction.xml deleted file mode 100644 index aa10a4ce59..0000000000 --- a/symmetric-assemble/src/docbook/introduction.xml +++ /dev/null @@ -1,523 +0,0 @@ - - - - Introduction - - This User Guide will introduce both basic and advanced concepts in the - configuration of SymmetricDS. By the end of this chapter, you will have a - better understanding of SymmetricDS' capabilities, and many of its basic - concepts. - -
- System Requirements - - SymmetricDS is written in Java and requires a Java SE Runtime - Environment (JRE) or Java SE Development Kit (JDK) version 6.0 or - above. - - Any database with trigger technology and a JDBC driver has the - potential to run SymmetricDS. The database is abstracted through a - Database Dialect in order to support specific - features of each database. The following Database Dialects have been - included with this release: - - - - MySQL version 5.0.2 and above - - - - MariaDB version 5.1 and above - - - - Oracle version 10g and above - - - - PostgreSQL version 8.2.5 and above - - - - Sql Server 2005 and above - - - - Sql Server Azure - - - - HSQLDB 2.x - - - - H2 1.x - - - - Apache Derby 10.3.2.1 and above - - - - IBM DB2 9.5 - - - - Firebird 2.0 and above - - - - Interbase 2009 and above - - - - Greenplum 8.2.15 and above - - - - SQLite 3 and above - - - - Sybase Adaptive Server Enterprise 12.5 and above - - - - Sybase SQL Anywhere 9 and above - - - - See , for compatibility notes and other - details for your specific database. -
- -
- Concepts - -
- Nodes - - SymmetricDS is a Java-based application that hosts a - synchronization engine which acts as an agent for data synchronization - between a single database instance and other synchronization engines in - a network. - - The SymmetricDS engine is referred to as a - node. SymmetricDS is designed to be able to scale - out to many thousands of nodes. The database connection is configured by - providing a database connection string, database user, and database - password in a properties file. SymmetricDS can synchronize any table - that is accessible by the database connection, given that the database - user has been assigned the appropriate database permissions. - -
- Simple Configuration - - - - - - -
- - A SymmetricDS node is assigned an external id and a node group id. - The external id is a meaningful, user-assigned identifier that is used - by SymmetricDS to understand which data is destined for a given node. - The node group id is used to identify groupings or tiers of nodes. It - defines where the node fits into the overall node network. For example, - one node group might be named “corporate” and represent an enterprise or - corporate database. Another node group might be named “local_office” and - represent databases located in different offices across a country. The - external id for a “local_office” could be an office number or some other - identifying alphanumeric string. A node is uniquely identified in a - network by a node id that is automatically generated from the external - id. If local office number 1 had two office databases and two - SymmetricDS nodes, they would probably have an external id of “1” and - node ids of “1-1” and “1-2.” - - SymmetricDS can be deployed in a number of ways. The most common - option is to deploy it as a stand alone process running as a service on - your chosen server platform. When deployed in this manner SymmetricDS - can act as either a client, a multi-tenant server or both depending on - where the SymmetricDS database fits into the overall network of - databases. Although it can run on the same server as its database, it is - not required to do so. SymmetricDS can also be deployed as a web - application in an application server such as Apache Tomcat, JBoss - Application Server, IBM WebSphere, or others. - - SymmetricDS was designed to be a simple, approachable, - non-threatening tool for technology personnel. It can be thought of and - dealt with as a web application, only instead of a browser as the - client, other SymmetricDS engines are the clients. It has all the - characteristics of a web application and can be tuned using the same - principles that would be used to tune user facing web - applications. -
- -
- Change Data Capture - - Changes are captured at a SymmetricDS enabled database by database - triggers that are installed automatically by SymmetricDS based on - configuration settings that you specify. The database triggers record - data changes in the - table. The triggers are designed to be as noninvasive and as - lightweight as possible. After SymmetricDS triggers are installed, - changes are captured for any Data Manipulation Language (DML) statements - performed by external applications. Note that no additional libraries or - changes are needed by the applications that use the database and - SymmetricDS does not have to be online for data to be captured. - - Database tables that need to be replicated are configured in a - series of SymmetricDS configuration tables. The configuration for the - entire network of nodes is typically managed at a central node in the - network, known as the registration server node. The registration server - node is almost always the same node as the root node in a tree topology. - When configuring “leaf” nodes, one of the start-up parameters is the URL - of the registration server node. If the “leaf” node has not yet - registered, it contacts the registration server and requests to join the - network. Upon acceptance, the node downloads its configuration. After a - node is registered, SymmetricDS can also provide an initial load of data - before synchronization starts. - - SymmetricDS will install or update its database triggers at - start-up time and on a regular basis when a scheduled sync triggers - job runs (by default, each night at midnight). The sync triggers job - detects changes to your database structure or trigger configuration when - deciding whether a trigger needs to be rebuilt. Optionally, the sync - triggers job can be turned off and the database triggers DDL script can - be generated and run by a DBA. - - After changed data is inserted by the database trigger into the - table, it is batched and - assigned to a node by the router job. Routing data refers to choosing - the nodes in the SymmetricDS network to which the data should be sent. - By default, data is routed to other nodes based on the node group. - Optionally, characteristics of the data or of the target nodes can also - be used for routing. A batch of data is a group of data changes that are - transported and loaded together at the target node in a single database - transaction. Batches are recorded in the . Batches are node - specific. and are linked by . The delivery status of - a batch is maintained in . After the data has been delivered to a remote node - the batch status is changed to ‘OK.’ -
- -
- Change Data Delivery - - Data is delivered to remote nodes over HTTP or HTTPS. It can be - delivered in one of two ways depending on the type of transport link - that is configured between node groups. A node group can be configured - to push changes to other nodes in a group or pull changes from other - nodes in a group. Pushing is initiated from the push job at the source - node. If there are batches that are waiting to be transported, the - pushing node will reserve a connection to each target node using an HTTP - HEAD request. If the reservation request is accepted, then the source - node will fully extract the data for the batch. Data is extracted to a - memory buffer in CSV format until a configurable threshold is reached. - If the threshold is reached, the data is flushed to a file and the - extraction of data continues to that file. After the batch has been - extracted, it is transported using an HTTP PUT to the target node. The - next batch is then extracted and sent. This is repeated until the - maximum number of batches have been sent for each channel or there are - no more batches available to send. After all the batches have been sent - for one push, the target returns a list of the batch statuses. - - Pull requests are initiated by the pull job from at the target - node. A pull request uses an HTTP GET. The same extraction process that - happens for a "push" also happens during a "pull." - - After data has been extracted and transported, the data is loaded - at the target node. Similar to the extract process, while data is being - received the data loader will cache the CSV in a memory buffer until a - threshold is reached. If the threshold is reached the data is flushed to - a file and the receiving of data continues. After all of the data in a - batch is available locally, a database connection is retrieved from the - connection pool and the events that had occurred at the source database - are played back against the target database. -
- -
- Data Channels - - Data is always delivered to a remote node in the order it was - recorded for a specific channel. A channel is a user defined grouping of - tables that are dependent on each other. Data that is captured for - tables belonging to a channel is always synchronized together. Each - trigger must be assigned a channel id as part of the trigger definition - process. The channel id is recorded on SYM_DATA and SYM_OUTGOING_BATCH. - If a batch fails to load, then no more data is sent for that channel - until the failure has been addressed. Data on other channels will - continue to be synchronized, however. - - If a remote node is offline, the data remains recorded at the - source database until the node comes back online. Optionally, a timeout - can be set where a node is removed from the network. Change data is - purged from the data capture tables by SymmetricDS after it has been - sent and a configurable purge retention period has been reached. Unsent - change data for a disabled node is also purged. - - The default behavior of SymmetricDS in the case of data integrity - errors is to attempt to repair the data. If an insert statement is run - and there is already a row that exists, SymmetricDS will fall back and - try to update the existing row. Likewise, if an update that was - successful on a source node is run and no rows are found to update on - the destination, then SymmetricDS will fall back to an insert on the - destination. If a delete is run and no rows were deleted, the condition - is simply logged. This behavior can be modified by tweaking the settings - for conflict detection and resolution. - - SymmetricDS was designed to use standard web technologies so it - can be scaled to many clients across different types of databases. It - can synchronize data to and from as many client nodes as the deployed - database and web infrastructure will support. When a two-tier database - and web infrastructure is maxed out, a SymmetricDS network can be - designed to use N-tiers to allow for even greater scalability. At this - point we have covered what SymmetricDS is and how it does its job of - replicating data to many databases using standard, well understood - technologies. -
-
- -
- Features - - At a high level, SymmetricDS comes with a number of features that - you are likely to need or want when doing data synchronization. A majority - of these features were created as a direct result of real-world use of - SymmetricDS in production settings. - -
- Two-Way Table Synchronization - - In practice, much of the data in a typical synchronization - requires synchronization in just one direction. For example, a retail - store sends its sales transactions to a central office, and the central - office sends its stock items and pricing to the store. Other data may - synchronize in both directions. For example, the retail store sends the - central office an inventory document, and the central office updates the - document status, which is then sent back to the store. SymmetricDS - supports bi-directional or two-way table synchronization and avoids - getting into update loops by only recording data changes outside of - synchronization. -
- -
- - - Data Channels - - - - SymmetricDS supports the concept of channels - of data. Data synchronization is defined at the table (or table subset) - level, and each managed table can be assigned to a - channel that helps control the flow of data. A - channel is a category of data that can be enabled, prioritized and - synchronized independently of other channels. For example, in a retail - environment, users may be waiting for inventory documents to update - while a promotional sale event updates a large number of items. If - processed in order, the item updates would delay the inventory updates - even though the data is unrelated. By assigning changes to the item - tables to an item channel and inventory tables' - changes to an inventory channel, the changes are - processed independently so inventory can get through despite the large - amount of item data. - - Channels are discussed in more detail in - - - - . -
- -
- Change Notification - - After a change to the database is recorded, the SymmetricDS nodes - interested in the change are notified. Change notification is configured - to perform either a push (trickle-back) or a - pull (trickle-poll) of data. When several nodes - target their changes to a central node, it is efficient to push the - changes instead of waiting for the central node to pull from each source - node. If the network configuration protects a node with a firewall, a - pull configuration could allow the node to receive data changes that - might otherwise be blocked using push. The frequency of the change - notification is configurable and defaults to once per minute. -
- -
- HTTP(S) Transport - - By default, SymmetricDS uses web-based HTTP or HTTPS in a style - called Representation State Transfer (REST). It is lightweight and easy - to manage. A series of filters are also provided to enforce - authentication and to restrict the number of simultaneous - synchronization streams. The ITransportManager - interface allows other transports to be implemented. -
- -
- Data Filtering and Rerouting - - Using SymmetricDS, data can be filtered as it is recorded, - extracted, and loaded. - - Data routing is accomplished by assigning a router type to a - configuration. - Routers are responsible for identifying what target nodes captured - changes should be delivered to. Custom routers are possible by - providing a class implementing IDataRouter - . - - - - In addition to synchronization, SymmetricDS is also capable - of performing fairly complex transformations (see ) of data as - the synchronization data is loaded into a target database. The - transformations can be used to merge source data, make multiple - copies of source data across multiple target tables, set defaults - in the target tables, etc. The types of transformation can also be - extended to create even more custom transformations. - - - - As data changes are loaded in the target database, data can - be filtered, either by a simple bean shell load filter (see - - data-load-filter) or by a class implementing . You - can change the data in a column, route it somewhere else, trigger - initial loads, or many other possibilities. One possible use might - be to route credit card data to a secure database and blank it out - as it loads into a centralized sales database. The filter can also - prevent data from reaching the database altogether, effectively - replacing the default data loading process. - - -
- -
- Transaction Awareness - - Many databases provide a unique transaction identifier associated - with the rows that are committed together as a transaction. SymmetricDS - stores the transaction identifier, along with the data that changed, so - it can play back the transaction exactly as it occurred originally. This - means the target database maintains the same transactional integrity as - its source. Support for transaction identification for supported - databases is documented in the appendix of this guide. -
- -
- Remote Management - - Administration functions are exposed through Java Management - Extensions (JMX) and can be accessed from the Java JConsole or through - an application server. Functions include opening registration, reloading - data, purging old data, and viewing batches. A number of configuration - and runtime properties are available to be viewed as well. - - SymmetricDS also provides functionality to send SQL events through - the same synchronization mechanism that is used to send data. The data - payload can be any SQL statement. The event is processed and - acknowledged just like any other event type. -
- -
- File Synchronization - Quite a few users of SymmetricDS have found that they have a need to not only synchronize database tables to remote locations, but they also have a set of files that should be synchronized. As of version 3.5 SymmetricDS now support file synchronization. - Please see - for more information. -
-
- - -
- Why Database Triggers? - - There are several industry recognized techniques to capture changing - data for replication, synchronization and integration in a relational - database. - - - - Lazy data capture queries changed data from - a source system using some SQL condition (like a time stamp - column). - - - - Trigger-based data capture installs - database triggers to capture changes. - - - - Log-based data capture reads data changes - from proprietary database recovery logs. - - - - All three of these techniques have advantages and disadvantages, and - all three are on the road map for SymmetricDS. At present time, - SymmetricDS supports trigger-based data capture and partial lazy data - capture. These two techniques were implemented first for a variety of - reasons, not the least of which is that the majority of use cases that - SymmetricDS targets can be solved using trigger-based and conditional - replication in a way that allows for more database platforms to be - supported using industry standard technologies. This fact allowed SymmetricDS - developers' valuable time and energy to be invested in designing a product - that is easy to install, configure and manage versus spending time reverse - engineering proprietary and not well documented database log files. - - Trigger-based data capture does introduce a measurable amount of - overhead on database operations. The amount of overhead can vary greatly - depending on the processing power and configuration of the database - platform, and the usage of the database by applications. With nonstop - advances in hardware and database technology, trigger-based data capture - has become feasible for use cases that involve high data throughput or - require scaling out. - - Trigger-based data capture is easier to implement and support than - log-based solutions. It uses well known database concepts and is very - accessible to software and database developers and database - administrators. It can usually be installed, configured, and managed by - application development teams or database administrators and does not - require deployment on the database server itself. -
- - -
diff --git a/symmetric-assemble/src/docbook/manage.xml b/symmetric-assemble/src/docbook/manage.xml deleted file mode 100644 index ce04820e25..0000000000 --- a/symmetric-assemble/src/docbook/manage.xml +++ /dev/null @@ -1,1135 +0,0 @@ - - - - - Manage - -
- Identifying Nodes - - A - node - is a single instance of SymmetricDS. It can be thought of as a proxy for a database which manages the - synchronization of data to and/or from its database. For our example retail application, the following would be - SymmetricDS nodes: - - Each point-of-sale workstation. - The central office database server. - - Each node of SymmetricDS can be either embedded in another application, run stand-alone, or even run in the - background as a service. If desired, nodes can be clustered to help disperse load if they send and/or receive - large volumes of data to or from a large number of nodes. - - Individual nodes are easy to identify when planning your implementation. If a database exists in your domain - that needs to send or receive data, there needs to be a corresponding SymmetricDS instance (a node) responsible - for managing the synchronization for that database. - - -
-
- Creating Nodes - Nodes are defined in the - - table. Two other tables play a direct role in defining a node, as well - The first is - - . The - only - row in this table is inserted in the database when the node first - registers - with a parent node. In the case of a root node, the row is entered by - the user. The row is used by a node instance to determine its node - identity. - - - - The following SQL statements set up a top-level registration server as a - node identified as "00000" in the "corp" node group. - insert into SYM_NODE (node_id, - node_group_id, external_id, sync_enabled) values ('00000', 'corp', - '00000', 1); insert into SYM_NODE_IDENTITY values ('00000'); - - - - The second table, - - has rows created for each - child - node that registers with the node, assuming auto-registration is - enabled. If auto registration is not enabled, you must create a row in - - and - - for the node to be able to register. You can also, with this table, - manually cause a node to re-register or do a re-initial load by setting - the corresponding columns in the table itself. - -
- Registration - - Node registration is the act of setting up a new - - and - - so that when the new node is brought online it is allowed to join the - system. Nodes are only allowed to register if rows exist for the node - and the - registration_enabled - flag is set to 1. If the - auto.registration - SymmetricDS property is set to true, then when a node attempts to - register, if registration has not already occurred, the node will - automatically be registered. - - - - SymmetricDS allows you to have multiple nodes with the same - external_id - . Out of the box, openRegistration will open a new registration if a - registration already exists for a node with the same external_id. A new - registration means a new node with a new - node_id - and the same - external_id - will be created. If you want to re-register the same node you can use - the - reOpenRegistration() - JMX method which takes a - node_id - as an argument. - -
- -
- - Initial Loads - - An initial load is the process of seeding tables at a - target node with data from its parent node. When a node connects and - data is extracted, after it is registered and if an initial load was - requested, each table that is configured to synchronize to the target - node group will be given a reload event in the order defined by the end - user. A SQL statement is run against each table to get the data load - that will be streamed to the target node. The selected data is filtered - through the configured router for the table being loaded. If the data - set is going to be large, then SQL criteria can optionally be provided - to pare down the data that is selected out of the database. - - - An initial load cannot occur until after a node is registered. An - initial load is requested by setting the - initial_load_enabled - column on - - to - 1 - on the row for the target node in the parent node's database. You can - configure SymmetricDS to automatically perform an initial load when a - node registers by setting the parameter - auto.reload - to true. Regardless of how the initial load is initiated, the next time - the source node routes data, reload batches will be inserted. At the - same time reload batches are inserted, all previously pending batches - for the node are marked as successfully sent. - - - - - Note that if the parent node that a node is registering with is - not - a registration server node (as can happen with a registration redirect - or certain non-tree structure node configurations) the parent node's - - entry must exist at the parent node and have a non-null value for column - initial_load_time - . Nodes can't be registered to non-registration-server nodes without - this value being set one way or another (i.e., manually, or as a result - of an initial load occurring at the parent node). - - - - - SymmetricDS recognizes that an initial load has completed when the - initial_load_time - column on the target node is set to a non-null value. - - - - An initial load is accomplished by inserting reload batches in a defined - order according to the - initial_load_order - column on - - . If the - initial_load_order - column contains a negative value the associated table will - NOT - be loaded. If the - initial_load_order - column contains the same value for multiple tables, SymmetricDS will - attempt to order the tables according to foreign key constraints. If - there are cyclical constraints, then foreign keys might need to be - turned off or the initial load will need to be manually configured based - on knowledge of how the data is structured. - - - Initial load data is always queried from the source - database table. All data is passed through the configured router to - filter out data that might not be targeted at a node. - -
- Target table prep for initial load - There are several parameters that can be used to specify - what, if anything, should be done to the table on the target database - just prior to loading the data. Note that the parameters below specify - the desired behavior for all tables in the initial load, not just one. - - - initial.load.delete.first / - initial.load.delete.first.sql - - By default, an initial load will not delete existing rows from a target - table before loading the data. If a delete is desired, the parameter - initial.load.delete.first - can be set to true. If true, the command found in - initial.load.delete.first.sql - will be run on each table prior to loading the data. The default value - for - initial.load.delete.first.sql - is - delete from %s - , but could be changed if needed. Note that additional reload batches - are created, in the correct order, to achieve the delete. - - - - initial.load.create.first - - By default, an initial load will not create the table on the target if - it doesn't alleady exist. If the desired behavior is to create the table - on the target if it is not present, set the parameter - intial.load.create.first - to true. SymmetricDS will attempt to create the table and indexes on the - target database before doing the initial load. (Additional batches are - created to represent the table schema). - - - -
-
- Loading subsets of data - - - An efficient way to select a subset of data from a table for an initial - load is to provide an - initial_load_select - clause on - - . This clause, if present, is applied as a - where - clause to the SQL used to select the data to be loaded. The clause may - use "t" as an alias for the table being loaded, if needed. The - $(externalId) - token can be used for subsetting the data in the where clause. - - - - In cases where routing is done using a feature like - Subselect Router - , an - initial_load_select - clause matching the subselect's criteria would be a more efficient - approach. Some routers will check to see if the - initial_load_select - clause is provided, and they will - not - execute assuming that the more optimal path is using the - initial_load_select - statement. - - - - One example of the use of an initial load select would be if you wished - to only load data created more recently than the start of year 2011. - Say, for example, the column - created_time - contains the creation date. Your - initial_load_select - would read - created_time > ts {'2011-01-01 00:00:00.0000'} - (using whatever timestamp format works for your database). This then - gets applied as a - where - clause when selecting data from the table. - - - - - When providing an - initial_load_select - be sure to test out the criteria against production data in a query - browser. Do an explain plan to make sure you are properly using indexes. - - -
- -
- Splitting an Initial Load for a Table Across Multiple Batches - - By default, all data for a given table will be initial loaded in a single batch, regardless - of the max batch size parameter on the reload channel. That is, for a table with one million - rows, all rows for that table will be initial loaded and sent to the destination node in a - single batch. For large tables, this can result in a batch that can take a long time to - extract and load. - - - - Initial loads for a table can be broken into multiple batches by specifying - initial.load.use.extract.job.enabled to true. This parameter allows - SymmetricDS to pre-extract initial load batches versus having them extracted when - the batch is pulled or pushed. When using this parameter, there are two ways to tell - SymmetricDS the number of batches to create for a given table. The first is to specify - a positive integer in the initial_load_batch_count column on - . This - number will dictate the number of batches created for the initial load of the given table. - The second way is to specify 0 for initial_load_batch_count on - and - specify a max_batch_size on the reload channel in . - When 0 is specified for - initial_load_batch_count, SymmetricDS will execute a count(*) query on the table during - the extract process and create N batches based on the total number of records found - in the table divided by the max_batch_size on the reload channel. - - -
- -
- Reverse Initial Loads - - The default behavior for initial loads is to load data from the - registration server or parent node, to a client node. Occasionally, - there may be need to do a one-time intial load of data in the opposite - or "reverse" direction, namely from a client node to the registration - node. To achieve this, set the parameter - auto.reload.reverse - to be true at the registration node. This will cause - a onetime reverse load of data, for tables configured - with non-negative initial load orders, to be batched at the point when - registration of the client node is occurring. These batches are then - sent to the parent or registration node. This capability might be - needed, for example, if there is data already present in the client that - doesn't exist in the parent but needs to. - - - Reverse initial load can be invoked manually by setting reverse_initial_load_enabled=1 in - sym_node_security on the node that is supposed to send the load. - -
-
-
- Data Reloads - - There may be times where you find you need to re-send or re-synchronize data when the change itself was not captured. This could be needed, for example, - if the data changes occurred prior to SymmetricDS placing triggers on the data tables themselves, or if the data at the destination was accidentally deleted, or for - some other reason. Two approaches are commonly taken to re-send the data, both of which are discussed below. - - - - Be careful when re-sending data using either of these two techniques. Be sure you are only sending the rows you intend to send and, - more importantly, be sure to re-send the data in a way that won't cause foreign key constraint issues at the destination. In other words, - if more than one table is involved, be sure to send any tables which are referred to by other tables by foreign keys first. Otherwise, - the channel's synchronization will block because SymmetricDS is unable to insert or update the row because the foreign key relationship refers to - a non-existent row in the destination! - - - - One possible approach would be to "touch" the rows in individual tables that need re-sent. By "touch", we mean to alter the row data in such a way - that SymmetricDS detects a data change and therefore includes the data change in the batching and synchronizing steps. Note that you have to - change the data in some meaningful way (e.g., update a time stamp); setting a column to its current value is not sufficient (by default, if there's not an actual data - value change SymmetricDS won't treat the change as something which needs synched. - - A second approach would be to take advantage of SymmetricDS built-in functionality by simulating a partial "initial load" of the data. The approach - is to manually create "reload" events in for the necessary tables, thereby resending the desired rows for the given tables. - Again, foreign key constraints must be kept in mind when creating these reload events. These reload events are created in the source database itself, and - the necessary table, trigger-router combination, and channel are included to indicate the direction of synchronization. - - To create a reload event, you create a row, using: - - data_id: null - table_name: name of table to be sent - event_type: 'R', for reload - row_data: a "where" clause (minus the word 'where') which defines the subset of rows from the table to be sent. To send all rows, one can use 1=1 for this value. - pk_data: null - old_data: null - trigger_hist_id: use the id of the most recent entry (i.e., max(trigger_hist_id) ) in - for the trigger-router combination for your table and router. - channel_id: the channel in which the table is routed - transaction_id: pick a value, for example '1' - source_node_id: null - external_data: null - create_time: current_timestamp - - - - - Let's say we need to re-send a particular sales transaction from the store to corp over again because we lost the data in corp due to - an overzealous delete. For the tutorial, all transaction-related tables start with sale_, - use the sale_transaction channel, and are routed using the store_corp_identity - router. In addition, the trigger-routers have been set up with an initial load order based on the necessary - foreign key relationships (i.e., transaction tables which are "parents" have a lower initial load order than those of their - "children"). An insert statement that would create the necessary "reload" events (three in this case, one for each table) would be as follows - (where MISSING_ID is changed to the needed transaction id): - - insert into sym_data (source_node_id, table_name, event_type, row_data, - trigger_hist_id, channel_id, create_time) ( - select '00001', t.source_table_name, 'R', 'tran_id=''MISSING-ID''', - h.trigger_hist_id, t.channel_id, current_timestamp - from sym_trigger t inner join sym_trigger_router tr on - t.trigger_id=tr.trigger_id inner join sym_trigger_hist h on - h.trigger_hist_id=(select max(trigger_hist_id) from sym_trigger_hist - where trigger_id=t.trigger_id) - where channel_id='sale_transaction' and - tr.router_id like 'store_corp_identity' and - (t.source_table_name like 'sale_%') - order by tr.initial_load_order asc); - - - This insert statement generates three rows, one for each configured sale table. It uses the most recent - trigger history id for the corresponding table. It takes advantage of the initial load order for each trigger-router to - create the three rows in the correct order (the order corresponding to the order in which the tables would have been initial loaded). - - -
-
-
- Jobs - - Work done by SymmetricDS is initiated by jobs. Jobs are tasks that are - started and scheduled by a job manager. Jobs are enabled by the - start.{name}.job - property. Most jobs are enabled by default. The frequency at which a job - runs in controlled by one of two properties: - job.{name}.period.time.ms - or - job.{name}.cron - . If a valid cron property exists in the configuration, then it will be - used to schedule the job. Otherwise, the job manager will attempt to use - the period.time.ms property. - - - The frequency of jobs can be configured in either the engines properties - file or in - - . When managed in - - the frequency properties can be changed in the registration server and - when the updated settings sync to the nodes in the system the job - manager will restart the jobs at the new frequency settings. - - - SymmetricDS utilizes Spring's CRON support, which includes seconds as - the first parameter. This differs from the typical Unix-based - implementation, where the first parameter is usually minutes. For - example, - */15 * * * * * - means every 15 seconds, not every 15 minutes. See - Spring's - documentation - for more details. - - - Some jobs cannot be run in parallel against a single node. When running - on a cluster these jobs use the - - table to get an exclusive semaphore to run the job. In order to use this - table the - cluster.lock.enabled - must be set to true. - - The three main jobs in SymmetricDS are the route, push and - pull jobs. The route job decides what captured data changes should be - sent to which nodes. It also decides what captured data changes should - be transported and loaded together in a batch. The push and pull jobs - are responsible for initiating HTTP communication with linked nodes to - push or pull data changes that have been routed. - -
- Route Job - - After data is captured in the - - table, it is routed to specific nodes in batches by the - Route Job - . It is a single background task that inserts into - - and - - . - - - The job processes each enabled channel, one at a time, collecting a list - of data ids from - - which have not been routed (see - - for much more detail about this step), up to a limit specified by the - channel configuration ( - max_data_to_route - , on - - ). The data is then batched based on the - batch_algorithm - defined for the channel. Note that, for the - default - and - transactional - algorithm, there may actually be more than - max_data_to_route - included depending on the transaction boundaries. The mapping of data to - specific nodes, organized into batches, is then recorded in - - with a status of "RT" in each case (representing the fact that the Route - Job is still running). Once the routing algorithms and batching are - completed, the batches are organized with their corresponding data ids - and saved in - - . Once - - is updated, the rows in - - are updated to a status of New "NE". - - - The route job will respect the - max_batch_size - on - - . If the max batch size is reached before the end of a database - tranaction and the batch algorithm is set to something other than - nontransactional - the batch may exceed the specified max size. - - - The route job delegates to a router defined by the - router_type - and configured by the - router_expression - in the - - table. Each router that has a - source_node_group_id - that matches the current node's source node group id and is linked to - the - - that captured the data gets an opportunity to choose a list of nodes the - data should be sent to. Data can only be routed to nodes that belong to - the router's - target_node_group_id - . - -
- Data Gaps - - On the surface, the first Route Job step of collecting unrouted data ids - seems simple: assign sequential data ids for each data row as it's - inserted and keep track of which data id was last routed and start from - there. The difficulty arises, however, due to the fact that there can be - multiple transactions inserting into - - simultaneously. As such, a given section of rows in the - - table may actually contain "gaps" in the data ids when the Route Job is - executing. Most of these gaps are only temporarily and fill in at some - point after routing and need to be picked up with the next run of the - Route Job. Thus, the Route Job needs to remember to route the filled-in - gaps. Worse yet, some of these gaps are actually permanent and result - from a transaction that is rolled back for some reason. In this case, - the Route Job must continue to watch for the gap to fill in and, at some - point, eventually gives up and assumes the gap is permanent and can be - skipped. All of this must be done in some fashion that guarantees that - gaps are routed when they fill in while also keeping routing as - efficient as possible. - - - SymmetricDS handles the issue of data gaps by making use of a table, - - , to record gaps found in the data ids. In fact, this table completely - defines the entire range of data tha can be routed at any point in time. - For a brand new instance of SymmetricDS, this table is empty and - SymmetricDS creates a gap starting from data id of zero and ending with - a very large number (defined by - routing.largest.gap.size - ). At the start of a Route Job, the list of valid gaps (gaps with status - of 'GP') is collected, and each gap is evaluated in turn. If a gap is - sufficiently old (as defined by - routing.stale.dataid.gap.time.ms - , the gap is marked as skipped (status of 'SK') and will no longer be - evaluated in future Route Jobs (note that the 'last' gap (the one with - the highest starting data id) is never skipped). If not skipped, then - - is searched for data ids present in the gap. If one or more data ids is - found in - - , then the current gap is marked with a status of OK, and new gap(s) are - created to represent the data ids still missing in the gap's range. This - process is done for all gaps. If the very last gap contained data, a new - gap starting from the highest data id and ending at (highest data id + - routing.largest.gap.size - ) is then created. This process has resulted in an updated list of gaps - which may contain new data to be routed. - -
-
-
- Push and Pull Jobs for Database changes - - After database-change data is routed, it awaits transport to the target nodes. Transport - can occur when a client node is configured to pull data or when the host - node is configured to push data. These events are controlled by the - push - and the - pull jobs - . When the - start.pull.job - SymmetricDS property is set to - true - , the frequency that data is pulled is controlled by the - job.pull.period.time.ms - . When the - start.push.job - SymmetricDS property is set to - true - , the frequency that data is pushed is controlled by the - job.push.period.time.ms - . - - - Data is extracted by channel from the source database's - - table at an interval controlled by the - extract_period_millis - column on the - - table. The - last_extract_time - is always recorded, by channel, on the - - table for the host node's id. When the Pull and Push Job run, if the - extract period has not passed according to the last extract time, then - the channel will be skipped for this run. If the - extract_period_millis - is set to zero, data extraction will happen every time the jobs run. - - - The maximum number of batches to extract per synchronization is - controlled by - max_batch_to_send - on the - - table. There is also a setting that controls the max number of bytes to - send in one synchronization. If SymmetricDS has extracted the more than - the number of bytes configured by the - transport.max.bytes.to.sync - parameter, then it will finish extracting the current batch and finish - synchronization so the client has a chance to process and acknowlege the - "big" batch. This may happen before the configured max number of batches - has been reached. - - - Both the push and pull jobs can be configured to push and pull multiple - nodes in parallel. In order to take advantage of this the - pull.thread.per.server.count - or - push.thread.per.server.count - should be adjusted (from their default value of 10) to the number to the - number of concurrent push/pulls you want to occur per period on each - SymmetricDS instance. Push and pull activity is recorded in the - - table. This table is also used to lock push and pull activity across - multiple servers in a cluster. - - - SymmetricDS also provides the ability to configure windows of time when - synchronization is allowed. This is done using the - - table. A list of allowed time windows can be specified for a node group - and a channel. If one or more windows exist, then data will only be - extracted and transported if the time of day falls within the window of - time specified. The configured times are always for the target node's - local time. If the - start_time - is greater than the - end_time - , then the window crosses over to the next day. - - - All data loading may be disabled by setting the - dataloader.enable - property to false. This has the effect of not allowing incoming - synchronizations, while allowing outgoing synchronizations. All data - extractions may be disabled by setting the - dataextractor.enable - property to false. These properties can be controlled by inserting into - the root server's - - table. These properties affect every channel with the exception of the - 'config' channel. - - Node communication over HTTP is represented in the - following figure. - -
- Node Communication - - - - - -
-
-
- -
- File Sync Push and Pull Jobs - - The File Sync Push and Pull jobs (introduced in version 3.5) are responsible for synchronizing file changes. - These jobs work with batches on the filesync channel and create ZIP files of changed files - to be sent and applied on other nodes. - The parameters job.file.sync.push.period.time.ms and job.file.sync.pull.period.time.ms - control how often the jobs runs, which default to every 60 seconds. - See also and . - -
- -
- File System Tracker Job - - The File System Tracker job (introduced in version 3.5) is responsible for monitoring and - recording the events of files being created, modified, or deleted. - It records the current state of files to the table. - The parameter job.file.sync.tracker.cron controls how often the job runs, - which defaults to every 5 minutes. - See also and . - -
- -
- Sync Triggers Job - - SymmetricDS examines the current configuration, corresponding database - triggers, and the underlying tables to determine if database triggers - need created or updated. The change activity is recorded on the - - table with a reason for the change. The following reasons for a change - are possible: - - - - N - New trigger that has not been created before - - - S - Schema changes in the table were detected - - - C - Configuration changes in Trigger - - - T - Trigger was missing - - - - A configuration entry in Trigger without any history in Trigger Hist - results in a new trigger being created (N). The Trigger Hist stores a - hash of the underlying table, so any alteration to the table causes the - trigger to be rebuilt (S). When the - last_update_time - is changed on the Trigger entry, the configuration change causes the - trigger to be rebuilt (C). If an entry in Trigger Hist is missing the - corresponding database trigger, the trigger is created (T). - - - The process of examining triggers and rebuilding them is automatically - run during startup and each night by the SyncTriggersJob. The user can - also manually run the process at any time by invoking the - syncTriggers() - method over JMX. - -
-
- Purge Jobs - - Purging is the act of cleaning up captured data that is no longer needed - in SymmetricDS's runtime tables. Data is purged through delete - statements by the - Purge Job - . Only data that has been successfully synchronized will be purged. - Purged tables include: - - - - - - - - - - - - - - - - - - - - - - - - - - - The purge job is enabled by the - start.purge.job - SymmetricDS property. The timing of the three purge jobs (incoming, - outgoing, and data gaps) is controlled by a cron expression as specified - by the following properties: - job.purge.outgoing.cron - , - job.purge.incoming.cron - , and - job.purge.datagaps.cron - . The default is - 0 0 0 * * * - , or once per day at midnight. - - - - Two retention period properties indicate how much history SymmetricDS - will retain before purging. The - purge.retention.minutes - property indicates the period of history to keep for synchronization - tables. The default value is 5 days. The - statistic.retention.minutes - property indicates the period of history to keep for statistics. The - default value is also 5 days. - - The purge properties should be adjusted according to how - much data is flowing through the system and the amount of storage space - the database has. For an initial deployment it is recommended that the - purge properties be kept at the defaults, since it is often helpful to - be able to look at the captured data in order to triage problems and - profile the synchronization patterns. When scaling up to more nodes, it - is recomended that the purge parameters be scaled back to 24 hours or - less. -
-
- -
- Outgoing Batches - - By design, whenever SymmetricDS encounters an issue with a synchronization, the batch containing the error is marked as being in - an error state, and all subsequent batches for that particular channel to that particular node are held and not - synchronized until the error batch is resolved. SymmetricDS will retry the batch in error until the situation creating the - error is resolved (or the data for the batch itself is changed). - - - - Analyzing and resolving issues can take place on the outgoing or incoming side. The techniques for analysis are slightly different in - the two cases, however, due to the fact that the node with outgoing batch data also has the data and data events associated with the batch in - the database. On the incoming node, however, all that is available is the incoming batch header and data present in an incoming error table. - -
- Analyzing the Issue - - - The first step in analyzing the cause of a failed batch is to locate information about the data in the batch, starting with - - To locate batches in error, use: - select * from sym_outgoing_batch where error_flag=1; - Several useful pieces of information are available from this query: - - - The batch number of the failed batch, available in column BATCH_ID. - - - The node to which the batch is being sent, available in column NODE_ID. - - - The channel to which the batch belongs, available in column CHANNEL_ID. - All subsequent batches on this channel to this node will be held until the error condition is resolved. - - - The specific data id in the batch which is causing the failure, available in column FAILED_DATA_ID. - - - Any SQL message, SQL State, and SQL Codes being returned during the synchronization attempt, available in columns SQL_MESSAGE, - SQL_STATE, and SQL_CODE, respectively. - - - - - Using the error_flag on the batch table, as shown above, is more reliable than using the - status column. The status column can change from 'ER' to a different status temporarily as - the batch is retried. - - The query above will also show you any recent batches that - were originally in error and were changed to be manually skipped. See the end of for more details. - - - To get a full picture of the batch, you can query for information representing the complete - list of all data changes associated with the failed batch by joining - and , such as: - select * from sym_data where data_id in - (select data_id from sym_data_event where batch_id='XXXXXX'); - where XXXXXX is the batch id of the failing batch. - - - This query returns a wealth of information about each data change in a batch, including: - - - The table involved in each data change, available in column TABLE_NAME, - - The event type (Update [U], Insert [I], or Delete [D]), available in column EVENT_TYPE, - - - A comma separated list of the new data and (optionally) the old data, available in columns ROW_DATA and - OLD_DATA, respectively. - - - The primary key data, available in column PK_DATA - - - The channel id, trigger history information, transaction id if available, and other information. - - - - - More importantly, if you narrow your query to just the failed data id you can determine the exact data change that is causing the failure: - select * from sym_data where data_id in - (select failed_data_id from sym_outgoing_batch where batch_id='XXXXX' - and node_id='YYYYY'); - where XXXXXX is the batch id and YYYYY is the node id of the batch that is failing. - - The queries above usually yield enough information to be able to determine why a - particular batch is failing. Common reasons a batch might be failing include: - - - The schema at the destination has a column that is not nullable yet the source - has the column defined as nullable and a data change was sent with the column as null. - - A foreign key constraint at the destination is preventing an insertion or update, which could be caused from - data being deleted at the destination or the foreign key constraint is not in place at the source. - - - The data size of a column on the destination is smaller than the data size in the source, and data that - is too large for the destination has been synced. - - - -
-
- Resolving the Issue - - - Once you have decided upon the cause of the issue, you'll have to decide the best course of action to fix the issue. If, for example, - the problem is due to a database schema mismatch, one possible solution would be to alter the destination database - in such a way that the SQL error no longer occurs. Whatever approach you take to remedy the issue, once you have - made the change, on the next push or pull SymmetricDS will retry the batch - and the channel's data will start flowing again. - - - If you have instead decided that the batch itself is wrong, or does not need synchronized, or you wish to remove a - particular data change from a batch, you do have the option of changing the data associated with the batch directly. - - - Be cautious when using the following two approaches to resolve synchronization issues. By far, the - best approach to solving a synchronization error is to resolve what is truly causing the - error at the destination database. Skipping a batch or removing a data id as discussed below should be your - solution of last resort, since doing so results in differences between the source and destination databases. - - - Now that you've read the warning, if you still want to change the batch - data itself, you do have several options, including: - - Causing SymmetricDS to skip the batch completely. This is accomplished by setting the - batch's status to 'OK', as in: - update sym_outgoing_batch set status='OK' where batch_id='XXXXXX' - where XXXXXX is the failing batch. On the next pull or push, SymmetricDS will skip this batch since - it now thinks the batch has already been synchronized. Note that you can still distinguish between successful - batches and ones that you've artificially marked as 'OK', since the error_flag column on - the failed batch will still be set to '1' (in error). - - - Removing the failing data id from the batch by deleting the corresponding row in . - Eliminating the data id from the list of data ids in the batch will cause future synchronization attempts - of the batch to no longer include that particular data change as part of the batch. For example: - delete from sym_data_event where batch_id='XXXXXX' and data_id='YYYYYY' - where XXXXXX is the failing batch and YYYYYY is the data id to longer be included in the batch. - - - -
- -
-
- Incoming Batches -
- Analyzing the Issue - - - Analysis using an incoming batch is different than that of outgoing batches. For incoming batches, you will rely on two tables, - and . - - The first step in analyzing the cause of an incoming failed batch is to locate information about the batch, starting with - - To locate batches in error, use: - select * from sym_incoming_batch where error_flag=1; - Several useful pieces of information are available from this query: - - - The batch number of the failed batch, available in column BATCH_ID. Note that this is the batch number of the - outgoing batch on the outgoing node. - - - The node the batch is being sent from, available in column NODE_ID. - - - The channel to which the batch belongs, available in column CHANNEL_ID. - All subsequent batches on this channel from this node will be held until the error condition is resolved. - - - The data_id that was being processed when the batch failed, available in column FAILED_DATA_ID. - - - Any SQL message, SQL State, and SQL Codes being returned during the synchronization attempt, available in columns SQL_MESSAGE, - SQL_STATE, and SQL_CODE, respectively. - - - - - - For incoming batches, we do not have data and data event entries in the database we can query. - We do, however, have a table, , which provides some information about the batch. - - - select * from sym_incoming_error - where batch_id='XXXXXX' and node_id='YYYYY'; - where XXXXXX is the batch id and YYYYY is the node id of the failing batch. - - - - - - This query returns a wealth of information about each data change in a batch, including: - - - The table involved in each data change, available in column TARGET_TABLE_NAME, - - The event type (Update [U], Insert [I], or Delete [D]), available in column EVENT_TYPE, - - - A comma separated list of the new data and (optionally) the old data, available in columns ROW_DATA and - OLD_DATA, respectively, - - The column names of the table, available in column COLUMN_NAMES, - - The primary key column names of the table, available in column PK_COLUMN_NAMES, - - - - -
-
- Resolving the Issue - - - For batches in error, from the incoming side you'll also have to decide the best course of action to fix the issue. - Incoming batch errors that are in conflict can by fixed by taking advantage of two columns in which are examined each time - batches are processed. The first column, resolve_data if filled in will be used in place of row_data. - The second column, resolve_ignore if set will cause this particular data item to be ignored and batch processing to continue. This is the same - two columns used when a manual conflict resolution strategy is chosen, as discussed in . - -
-
-
- Staging Area - - SymmetricDS creates temporary extraction and data load files with the CSV payload of a synchronization when - the value of the stream.to.file.threshold.bytes SymmetricDS property has been reached. Before reaching the threshold, files - are streamed to/from memory. The default threshold value is 32,767 bytes. This feature may be turned off by setting the stream.to.file.enabled - property to false. - - - SymmetricDS creates these temporary files in the directory specified by the java.io.tmpdir Java System property. - - - The location of the temporary directory may be changed by setting the Java System property passed into the Java program at startup. For example, - - -Djava.io.tmpdir=/home/.symmetricds/tmp - - -
-
- Logging - - The standalone SymmetricDS installation uses Log4J for logging. The configuration file is conf/log4j.xml. - The log4j.xml file has hints as to what logging can be enabled for useful, finer-grained logging. - - - There is a command line option to turn on preconfigured debugging levels. When the --debug option is used the conf/debug-log4j.xml is used instead of log4j.xml. - - - SymmetricDS proxies all of its logging through SLF4J. When deploying to an application server or if Log4J is not - being leveraged, then the general rules for for SLF4J logging apply. - -
- -
diff --git a/symmetric-assemble/src/docbook/parameters.xml b/symmetric-assemble/src/docbook/parameters.xml deleted file mode 100644 index ba5e92826f..0000000000 --- a/symmetric-assemble/src/docbook/parameters.xml +++ /dev/null @@ -1,134 +0,0 @@ - - - - Parameters - - There are two kinds of parameters that can be used to configure the behavior of SymmetricDS: - Startup Parameters - and - Runtime Parameters - . Startup Parameters are required to be in a system property or a property file, while Runtime Parameters can also be - found in the Parameter table from the database. Parameters are re-queried from their source at a configured interval - and can also be refreshed on demand by using the JMX API. The following table shows the source of parameters and the - hierarchy of precedence. - - Parameter Locations - - - - - - - Location - Required - Description - - - - - - symmetric-default.properties - - Y - Packaged inside symmetric-core jar file. This file has all the default settings along with - descriptions. - - - - - symmetric-override.properties - - N - Changes to this file, provided by the end user in the JVM's classpath, apply to all engines - in the JVM. - - - - - engines/*.properties - - N - Properties for a specific engine or node that is hosted in a standalone install. - - - - - Java System Properties - - N - Any SymmetricDS property can be passed in as a -D property to the runtime. It will take - precedence over any properties file property. - - - - - Parameter table - - N - A table which contains SymmetricDS parameters. Parameters can be targeted at a specific node - group and even at a specific external id. These settings will take precedence over all of the - above. - - - - - IParameterFilter - - N - An extension point which allows parameters to be sourced from another location or customized. - These settings will take precedence over all of the above. - - - - -
-
-
- Startup Parameters - - Startup parameters are read once from properties files and apply only during start up. The following properties - are used: - - -
-
- Runtime Parameters - - Runtime parameters are read periodically from properties files or the database. The following properties are - used: - - -
-
- Server Configuration - - Server configuration is read from conf/symmetric-server.conf for settings needed by the server - before the parameter system has been initialized. - - -
- -
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/quick-start.xml b/symmetric-assemble/src/docbook/quick-start.xml deleted file mode 100644 index a897ca9e43..0000000000 --- a/symmetric-assemble/src/docbook/quick-start.xml +++ /dev/null @@ -1,602 +0,0 @@ - - - - SymmetricDS Quick Start Guide - - - Eric - Long - - - Chris - Henson - - - Mark - Hanes - - - Greg - Wilmer - - - Version 3.6 for SymmetricDS - - 2007 - 2014 - JumpMind, Inc - - - v3.6 - - - - Permission to use, copy, modify, and distribute the SymmetricDS User Guide Version - 3.6 for any purpose and without fee is hereby granted in perpetuity, provided that - the above copyright notice and this paragraph appear in all copies. - - - - - Quick Start Tutorial - Now that an overview of SymmetricDS has been presented, a quick working example of SymmetricDS is in order. - This section contains a hands-on tutorial that demonstrates how to synchronize two databases with a similar schema between two nodes of SymmetricDS. This example models a retail business that has a - central office database (which we'll call the "root" or "corp" node) and multiple retail store databases (which we'll call the "client" or "store" nodes). - For the tutorial, we will have only one "client" or store node, as shown in , although by the end of the tutorial - you could extend the example and configure a second store, if desired. - - - -
- Simplified Two-Tier Retail Store Tutorial Example - - - - - -
-
- - For this tutorial, we will install two separate copies of SymmetricDS to represent the two different servers. One will represent the store server and one will represent the - corp server. Each installed copy of SymmetricDS will be responsible for one database, and thus each copy acts as a single "node" in SymmetricDS terminology. - This is the most common configuration of SymmetricDS - one installed copy of the software is responsible for one single database and represents one node. - (Incidentally, there is also an option to configure a single installed copy of SymmetricDS to be responsible for both nodes. This is called "multi-homing" and will be discussed at the very - end of the tutorial.) - Since you are most likely going to run both SymmetricDS copies on a single machine, we will run the two copies of SymmetricDS on two separate ports. - We will use port 8080 for the corp server and 9090 for the store server, as shown in . - - -
- Two SymmetricDS applications - one for corp, one for store - - - - - -
-
- - Functionally, the corp SymmetricDS application will be responsible for capturing item data changes for the client, such as - item number, description, and prices by store. The client SymmetricDS application (our store, specifically our first store, store # 001) captures sale transaction data - changes for the root, such as time of sale and items sold. The pricing information is sent - only to the specific store for which the price is relevant, thereby minimizing the amount of pricing data sent to each store. - In other words, item pricing specific to store 001 will only be sent to the database - for store 001 and not to store 002's database, for example. - - - The sample configuration has the client always initiating communication with the root node, which is a fairly common - configuration. In this configuration, the client - will attach to the root on a periodic basis to pull data from the server, and the client will - also push captured changes to the root when changes are available. - Enough overview. Let's get started. We will next walk through: - - Installing and configuring the two SymmetricDS applications, - Creating SymmetricDS configuration and sample tables as needed for the root and client, used to hold corp data and store data, respectively, - Creating sample retail data in the corp database, - Starting the SymmetricDS servers and registering the store with the corp node, - Sending an initial load of data to the store node, - Causing a data push and data pull operation, and - Verifying information about the batches that were sent and received. - - - -
- Installing SymmetricDS - - First, we will install two copies of the SymmetricDS software and configure it with your database - connection information: - - - - - Download the - symmetric-ds-3.x.x-server.zip - file from - http://www.symmetricds.org/ - - - - - Create two directories to represent your two "machines". One will hold the corp installation of SymmetricDS and one - to hold the store installation. - For example, you could name the directories sym-corp and sym-store001, and we'll assume you used these names - below (but feel free to update the steps below with your directory names as needed). - Unzip the above zip file into both directories. - This will create a symmetric-ds-3.x.x - directory, which corresponds to the version you downloaded. - - - - Properties files are use to store the minimal configuration information needed to start SymmetricDS. Copy the corp sample properties file to the corp engines directory, and the store one to the store engines directory. If you - used the suggested directory names above, you would do the following copies: - - - samples/corp-000.properties to sym-corp/symmetric-ds-3.x.x/engines/ - - and - - samples/store-001.properties to sym-store001/symmetric-ds-3.x.x/engines/ - - - - - Browse both properties files and explore the various settings. For exampl, notice that the root node is given a group id of corp, and that the store node - is given a group id of store. - Notice also that the root node is given an external id of 000, and the store node is given an external id of 001. - - Set the following properties in - both - properties files now present in the engines directories to specify how to connect to your particular database (the values below are just examples): - - - - - - Next, set the following property in the - store-001.properties - file to specify where the root node can be contacted: - - - - - Note that the URL for an engine is in the following general format: - - where the engine.name portion of the URL comes from a node's properties file. - - - - - -
-
- Creating and Populating Your Databases - - You must first create the databases for your root and client nodes using the administration tools provided by - your database vendor. Make sure the name of the databases you create match the settings in the properties files you modified in the previous step. - - - - First, create the sample tables in the - root - node database, load the sample data, and load the sample configuration, by doing the following: - - - - - Open a command prompt and navigate to the - samples - subdirectory of your corp SymmetricDS installation (for example, navigate to sym-corp/symmetric-ds-3.x.x/samples) - - - - Create the sample tables for items, prices, and sales, in the root database by executing the following command: - - ../bin/dbimport --engine corp-000 --format XML create_sample.xml - - Note that the warning messages from the command are safe to ignore. - Another quick comment about properties files. At startup, SymmetricDS looks for one or more properties files in the engines directory. Since we have - specified a --engine parameter on the command line, it will look only for the specific file listed, namely corp-000.properties. - Technically, the --engine corp-000 part is - optional in our particular tutorial example. Since there's only one properties file in the engines directory, SymmetricDS would just default - to using that one file, after all. By including it, though, it will reduce errors while running the tutorial, because if you run - the command from the wrong SymmetricDS installation, SymmetricDS will complain about the missing engines property file you specified. - - - - Next, create the SymmetricDS-specific tables in the corp node database. These tables will contain the configuration for - synchronization. The following command uses the auto-creation feature to create all the necessary SymmetricDS - system tables. - - ../bin/symadmin --engine corp-000 create-sym-tables - - - - Finally, load the sample item and transaction data and SymmetricDS configuration into the root node database by executing: - - ../bin/dbimport --engine corp-000 insert_sample.sql - - - Please note that for MySQL, you will need to use the file insert_sample_mysql.sql in the above command. MySql uses back ticks (i.e., ` ) instead - of double quotes (") for case-sensitive table and column names. The MySQL version of the file has the necessary change. - - - - - We have now created the corp database tables and populated them with our SymmetricDS configuration and sample data. Next, we will create the sample tables in the - store - node database to prepare it for receiving data. - - - - - Open a command prompt and navigate to the - samples - subdirectory of your store #001 SymmetricDS installation (for example, navigate to sym-store001/symmetric-ds-3.x.x/samples) - - - - Create the empty, sample tables in the client database by executing: - - ../bin/dbimport --engine store-001 --format XML create_sample.xml - - Note that the warning messages from the command are safe to ignore. Also, - feel free to review the create_sample.xml file to see what it contains. - - - - Please verify - both - databases by logging in and listing the tables. - - - - Find the item tables that sync from root to client (that is, from corp to store): item and item_selling_price. - - - Find the sales tables that sync from store to corp: sale_transaction and sale_return_line_item. - - - Find the SymmetricDS system tables, which have a prefix of "sym_", such as sym_channel, - sym_trigger, sym_router, and sym_trigger_router. - - - Validate the corp item tables have sample data. - - -
-
- Starting SymmetricDS - Database setup and configuration for the tutorial is now complete. Time to put SymmetricDS into action. - We will now start both SymmetricDS nodes and observe the logging output. - - - If they are not already open, open two command prompts and navigate to the samples directory of each installed SymmetricDS application - (for example, navigate to sym-corp/symmetric-ds-3.x.x/samples and sym-store001/symmetric-ds-3.x.x/samples). - - - From the corp samples directory, start the corp SymmetricDS by executing: - - ../bin/sym --engine corp-000 --port 8080 - - - Upon startup for the first time, the corp node creates all the triggers that were configured by the sample - configuration. It listens on port 8080 for synchronization and registration requests for the corp-000 engine. - - - From the store001 samples directory, start the store SymmetricDS by executing: - - ../bin/sym --engine store-001 --port 9090 - - This command starts the store node server for the first time and uses the auto-creation feature to create the SymmetricDS system - tables. It begins polling the corp node to try to register (it knows where to contact the corp node via the registration URL you configured in the previous steps). - Since registration is not yet open, the store - node receives an authorization failure (HTTP response of 403). We discuss registration next. - - -
-
- Registering a Node - When an unregistered node starts up, it will attempt to - register with the node specified by the registration URL (which is our root node, in almost every case). The registration node centrally controls nodes on - the network by allowing registration and returning configuration to a node once it has registered. In this tutorial, the registration node is - the root node or 'corp' node, and it also participates in synchronization with other nodes. - So, we next need to open registration for the store node so that it may receive its initial load of - data and so that it may receive and send data from and to the corp node. - There are several ways to do this. We will use an administration feature available in SymmetricDS and issue a command on the corp node (since it is the node responsible - for registration). - - - Leave the corp and store SymmetricDS applications that you started in the previous step running, and open a command prompt and navigate to corp's - samples - subdirectory of your corp SymmetricDS installation. - Open registration for the store node server by executing: - - ../bin/symadmin --engine corp-000 open-registration store 001 - - - The registration is now opened for a node group called "store" with an external identifier of "001". This - information matches the settings in - store-001.properties - for the store node. In SymmetricDS, each node is assigned to a node group and is given an external ID that makes sense for the - application. In this tutorial, we have retail stores that run SymmetricDS, so we named our node group representing stores as "store" and - we used numeric identifiers for external ids starting with "001" ("000" is used to represent the corp node). More information about node groups will be covered in the next chapter. - - - - Watch the logging output of the store node to see it successfully register with the corp node. The store - is configured to attempt registration at a random time interval up to every minute. - Once registered, the corp and store nodes are enabled for - synchronization! - - -
-
- Sending an Initial Load - Next, we will send an initial load of data to our store, again using a node administration feature run on the corp node. - - - - Open a command prompt and navigate to the corp - samples - subdirectory of the corp SymmetricDS installation. (Note that, in general, most system commands are issued using the corp server directly. - All configuration, for example, is entered at the corp and synchronized to any clients.) - - - - Send an initial load of data to the store node server by executing: - - ../bin/symadmin --engine corp-000 reload-node 001 - - With this command, the server node queues up an initial load for the store node that will be sent the next - time the store performs its pull. The initial load includes data for each table that is configured for - synchronization (assuming its initial load order is a non-negative number, as discussed in later chapters). - - - Watch the logging output of both nodes to see the data transfer. The store is configured to pull data from - the corp node every minute. - - -
-
- Pulling Data - Next, we will make a change to the item data in the central office corp node database (we'll add a new item), and observe the data being pulled down to - the store. - - - - Open an interactive SQL session with the corp database. - - - Add a new item for sale, with different prices at store 001 and store 002: - - insert into "item" ("item_id", "name") values (110000055, 'Soft Drink'); - - - insert into "item_selling_price" ("item_id", "store_id", "price") values (110000055, '001', 0.65); - insert into "item_selling_price" ("item_id", "store_id", "price") values (110000055, '002', 1.00); - - - Please note that for MySQL, you'll need to change the double quotes (") in the above commands to back ticks (i.e., `) - since MySQL uses back ticks instead of double quotes for case-sensitive table and column names. - - Once the statements are committed, the data change is captured by SymmetricDS and queued for the store node to pull. - - - - Watch the logging output of both nodes to see the data transfer. The store is configured to pull data from - the corp every minute. - - - Since item_selling_price is configured with a - column match router in this tutorial, specific pricing data changes will be sent (or "routed", in SymmetricDS terms) only to nodes whose store_id matches the node's external ID. - Verify that the new data arrives in the store database using another interactive SQL session. In this case, - the first pricing row will be routed to store 001 only, and the second row would be routed to store 002 (which doesn't exist currently, - so in this case the data change is recorded but routed nowhere and therefore discarded.) - - - -
-
- Pushing Data - We will now simulate a sale at the store and observe how SymmetricDS pushes the sale transaction to the central office. - - - Open an interactive SQL session with the store node database. - - - Add a new sale to the store node database: - - insert into "sale_transaction" ("tran_id", "store_id", "workstation", "day", "seq") values (1000, '001', '3', - '2007-11-01', 100); - - - insert into "sale_return_line_item" ("tran_id", "item_id", "price", "quantity") values (1000, 110000055, 0.65, - 1); - - Once the statements are committed, the data change is captured and queued for the store node to push. - - - - Watch the logging output of both nodes to see the data transfer. The store is configured to push data to - the corp node every minute. - - -
-
- Verifying Outgoing Batches - Now that we have pushed and pulled data, we will demonstrate how you can obtain information about what data has been batched and sent. - A batch is used for tracking and sending one or more data changes to a given node. The sending node creates a batch and the receiving - node receives and then acknowledges it. - In addition, in SymmetricDS tables are grouped into data "Channels" for, among many reasons, the purpose of allowing different types of data to synchronize - even when other types of data might be in error. For example, if a batch for a given channel is in error, that batch will be retried with each synchronization for - that channel until the batch is no longer in error. Only after the batch is no longer in error will additional batches for that channel be sent. In this way, the order of the data changes that have - occurred for a given channel are guaranteed to be sent to the destination in the same order they occurred on the source. - Batches on a channel without batch errors, however, will not be blocked by the existence of a batch in error on a different channel. In this way, - data changes for one channel are not blocked by errors present in another channel. - - Explore the outgoing batches by doing the following: - - - Open an interactive SQL session with either the corp or store database. - - - Verify that the data change you made was captured: - - select * from sym_data order by data_id desc; - - Each row represents a row of data that was changed. Data Ids are sequentially increasing, so one of the most recent (highest) data ids should be - related to your data insert SQLs. The event_type is "I" for insert, "U" for update", or - "D" for delete. For insert and update, the captured data values are listed in row_data. For update and delete, - the primary key values are listed in pk_data. - - - Verify that the data change was included in a batch, using the data_id from the previous step: - - select * from sym_data_event where data_id = ?; - - Batches are created based on the needed routing to nodes as part of a background job, called the Route Job. - As part of the Route Job, the data change is assigned to a batch using a batch_id which is used to track - and synchronize the data. The links between batches and data are managed by this sym_data_event table. - - - Verify that the data change was batched, sent to the destination, and acknowledged, using the batch_id from the previous step: - - - select * from sym_outgoing_batch where batch_id = ?; - - Batches initially have a status of "NE" when they are new and not yet sent to a node. Once a receiving node acknowledges the batch, the batch status is - changed to a status of "OK" for success or "ER" for error (failure). If the batch failed, the error_flag on - the batch is also sent to 1, since the status of a batch that failed can - change as it's being retried. - - - - Understanding these three tables, along with a fourth table discussed in the next section, is key to diagnosing any synchronization issues you might encounter. - As you work with SymmetricDS, either when experimenting or starting to use SymmetricDS on your own data, spend time monitoring these tables to - better understand how SymmetricDS works. - -
-
- Verifying Incoming Batches - The receiving node keeps track of the batches it acknowledges and records statistics about loading the data. - Duplicate batches are skipped by default, but this behavior can be changed with the incoming.batches.skip.duplicates - runtime property. - Explore incoming batches by doing the following: - - - Open an interactive SQL session with either the corp or store database. - - - Verify that the batch was received and acknowledged, using a batch_id from the previous section: - - select * from sym_incoming_batch where batch_id = ?; - - A batch represents a collection of changes loaded by the node. The sending node that created the batch is - recorded, and the batch's status is either "OK" for success or "ER" for error. - - -
- -
- Multi-Homing - - Our Quick Start Tutorial is finished. - We have successfully set up and performed synchronization between two databases. - However, we did want to go back and discuss one of the first steps you did in the tutorial; namely, the step where you - installed two copies of SymmetricDS when doing the tutorial. Feel free to skip this section until a later time if you wish. - - - In the example above, we placed one properties file in the engines directory of each installed SymmetricDS application. - When SymmetricDS was started in the examples above, the application initialized, and then created a "SymmetricDS engine" - based on the provided property file (again, each engine serves as a SymmetricDS node and is responsible for one particular database). - - - - In reality, though, the SymmetricDS application is capable of starting more than one engine at a time. When SymmetricDS starts, - it looks in the engines directory for any files that end in .properties. It will start a SymmetricDS engine for each and every - property file found. The --engine command line prompt is an override for this and will cause SymmetricDS to - only start the one engine as specified on the command line. In cases where a single SymmetricDS application is running multiple engines, this is known - as a "multi-homed" SymmetricDS application, and the feature, in general, is known as "multi-homing". - - -
- Multi-Homed version of Tutorial - - - - - -
-
- - So, for our tutorial above, how could we have "multi-homed" the corp and store such that we only had to install a single copy of SymmetricDS? - It's fairly simple. The following changes to the above would be needed: - - - - - Install a single copy of the SymmetricDS software instead of two copies. You no longer need a directory to represent the two machines. - - - Instead of copying a single property file from samples to each separate engines directory, copy both files - to just the one engines directory. - - All commands in the tutorial are run from the one single samples directory. - - - When you start SymmetricDS, you will no longer specify a specific engine, as you want both engines to start. The command, still run - from the samples directory, would now be: - - ../bin/sym --port 8080 - - Note that we are no longer using port 9090, by the way. SymmetricDS now listens on port 8080 for traffic relevant to both the - store and corp engines. - - - Other than starting the server, all other commands you executed will still have the --engine specification, since you are addressing the command - itself to a specific node (engine) of SymmetricDS to open registration, set up the corp server to issue an initial load to store, etc. - - - -
-
-
\ No newline at end of file diff --git a/symmetric-assemble/src/docbook/resources/css/docbook-style.css b/symmetric-assemble/src/docbook/resources/css/docbook-style.css deleted file mode 100644 index 4ec9d83ccb..0000000000 --- a/symmetric-assemble/src/docbook/resources/css/docbook-style.css +++ /dev/null @@ -1,591 +0,0 @@ -/** - * Licensed to JumpMind Inc under one or more contributor - * license agreements. See the NOTICE file distributed - * with this work for additional information regarding - * copyright ownership. JumpMind Inc licenses this file - * to you under the GNU General Public License, version 3.0 (GPLv3) - * (the "License"); you may not use this file except in compliance - * with the License. - * - * You should have received a copy of the GNU General Public License, - * version 3.0 (GPLv3) along with this library; if not, see - * . - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#banner { - background: transparent url( ../images/banner-bg.gif ) no-repeat scroll left top; - border-bottom: 1px solid #fff; - padding:10px; -} - -body { - margin: 0; - padding: 0; - background: url( ../images/book-bg.gif ) top left repeat-x; - font-family: Verdana, Arial, helvetica, sans-serif; -} - -a, a[accesskey="h"], a[accesskey="n"], a[accesskey="u"], a[accesskey="p"] { - font-size: 13px; - color: #0066ff; - text-decoration: none; -} - -a:active { - color: #0066CC; -} - -a:visited { - color: #990066; -} - -a:hover { - color: #0066ff; - text-decoration: underline; -} - -p, li { - font-size: 13px; - line-height: 133%; -} - -tt { - font-size: 110%; - font-family: "Courier New", Courier, monospace; - color: #000000; -} - -dt { - font-size: 13px; -} - -p, dl, dt, dd, blockquote { - color: #000000; - margin-bottom: 3px; - margin-top: 3px; - padding-top: 0; -} - -ol, ul, p { - margin-top: 6px; - margin-bottom: 10px; -} - -/*p, blockquote { - font-size: 90%; -}*/ - -p.releaseinfo { - font-size: 100%; - font-weight: bold; - padding-top: 10px; -} - -p.pubdate { - font-size: 120%; - font-weight: bold; -} - -blockquote { - margin-right: 0; -} - -pre { - padding: 15px; - border-style: solid; - border-width: 1px; - border-color: #CCCCCC; - background-color: #EEEEEE; -} - -ul, ol, li { - list-style: disc; -} - -ul li, ol li { - list-style-image: url( ../images/bullet.gif ); -} - -hr { - width: 100%; - height: 1px; - background-color: transparent; - border-width: 0; - padding: 0; - border-color: transparent; - color: transparent; -} - -/* titles */ - -h1, h2, h3, h4, h6, H6 { - color: #000000; - font-weight: 500; - margin-top: 0; - margin-bottom: 0; - padding: 8px 0 8px 0; -} - -h1.title { - font-weight: 800; - margin-bottom: 8px; - color: #0066ff; -} - -h2.title { - font-weight: 800; - margin-bottom: 8px; -} - -h2.subtitle { - font-weight: 800; - margin-bottom: 20px; -} - -h2.title, h3.title, h4.title, h5.title, h6.title { - color: #454545; -} - -h1 { - font-size: 175%; -} - -h2 { - font-size: 145%; -} - -h3 { - font-size: 133%; - font-weight: bold; -} - -h4 { - font-size: 120%; - font-weight: bold; -} - -h5 { - font-size: 110%; - font-weight: bold; -} - -h6 { - font-size: 100%; - font-weight: bold; -} - -/* Tables */ - -table { - border-collapse: collapse; - border-spacing: 0; - border: 1px solid black; - empty-cells: hide; - margin: 10px 0 30px 50px; - width: 90%; -} - -div.table { - margin: 30px 0 30px 0; - border: 1px dashed gray; - padding: 10px; -} - -div .table-contents table { - border: 1px solid black; -} - -div.table > p.title { - padding-left: 10px; -} - -td, th, span { - color: #000000; -} - -td { - font-size: 80%; - padding: 4pt; -} - -td.even { - color: black; - background-color: white; -} - -td.odd { - color: black; - background-color: #DDEEFF; -} - -td[width="40%"] { - font-size: 13px; - color: #003399; -} - -table[summary="Navigation header"] tbody tr th[colspan="3"] { -} - -th { - border: none; - empty-cells: hide; - background-color: #0066CC; - color: white; - font-weight: bold; - font-size: 90%; -} - -table[summary="Navigation footer"] { - border-collapse: collapse; - border-spacing: 0; - border: 1px black; - empty-cells: hide; - margin: 0; - width: 100%; -} - -table[summary="Note"], table[summary="Warning"], table[summary="Tip"] { - border-collapse: collapse; - border-spacing: 0; - border: 1px black; - empty-cells: hide; - margin: 10px 0 10px -20px; - width: 100%; -} - -div.warning TD { - text-align: justify; -} - -/* misc decorations */ - -.navheader, .navfooter { - border: none; -} - -.navfooter { - width: 960px; - padding: 10px 30px 20px 30px; - margin-left: auto; - margin-right: auto; - margin-bottom: 20px; - background-color: #DDEEFF; - border: 1px solid #99ccee; -} - -div.navfooter table { -} - -div.navfooter table td { - padding: 10px; -} - -.variablelist { - padding-top: 10px; - padding-bottom: 10px; - margin: 0; -} - -.term { - font-weight: bold; -} - -.mediaobject { - padding-top: 30px; - padding-bottom: 30px; -} - -.legalnotice { - font-size: 12px; - font-style: italic; -} - -.legalnotice p { - color: #454545; -} - -.sidebar { - float: right; - margin: 10px 0 10px 30px; - padding: 10px 20px 20px 20px; - width: 33%; - border: 1px solid black; - background-color: #F4F4F4; - font-size: 14px; -} - -.property { - font-family: "Courier New", Courier, monospace; -} - -a code { - font-family: Verdana, Arial, monospace; -} - -td code { - font-size: 110%; -} - -div.note * td, div.tip * td, div.warning * td, div.calloutlist * td { - text-align: justify; - font-size: 100%; -} - -.programlisting .interfacename, .programlisting .literal, .programlisting .classname { - font-size: 95%; -} - -.title .interfacename, .title .literal, .title .classname { - font-size: 130%; -} - -.programlisting * .lineannotation, .programlisting * .lineannotation * { - color: #003399; -} - -/* Modifications by tsoulcie may the 10th 2007 */ - -.programlisting { - overflow: auto; -} - -.book, .chapter, .appendix, .part { - width: 900px; - margin-top: 30px; - margin-left: auto; - margin-bottom: 20px; - margin-right: auto; - padding: 20px 60px 20px 60px; - border-top: 1px solid #666; - border-left: 1px solid #666; - border-right: 1px solid #666; - border-bottom: 1px solid #666; - background-color: #fff; -} - -.book .chapter, .book .appendix, .book .part { - margin: 0; - padding: 0; - border-style: none; -} - -.titlepage { - color: #666; -} - -span.application { - /* add some emphasis here */ -} - -.warning { - background: #FFF8DE url( ../images/warning.gif ) no-repeat scroll 10px 12px; - border: 1px solid #EE8C00; - padding: 10px; - margin: 25px 20px; -} - -.warning .title { - font-size: 100%; - padding-top: 0; - margin-left: 20px; -} - -.important { - background: #FFF8DE url( ../images/important.gif ) no-repeat scroll 10px 12px; - border: 1px solid #EE8C00; - padding: 10px; - margin: 25px 20px; -} - -.important .title { - font-size: 100%; - padding-top: 0; - margin-left: 40px; -} - -.note { - background: #EBF5FF url( ../images/info.gif ) no-repeat scroll 10px 12px; - border: 1px solid #0074E7; - padding: 10px; - margin: 25px 20px; -} - -.note .title { - font-size: 100%; - padding-top: 0; - margin-left: 20px; -} - -.tip { - background: #e6ffdb url( ../images/tip.gif ) no-repeat scroll 10px 12px; - border: 1px solid #339900; - padding: 10px; - margin: 25px 20px; -} - -.tip .title { - font-size: 100%; - padding-top: 0; - margin-left: 20px; -} - -.msg { - background: #e6ffdb url(../images/success.png ) no-repeat 20px center; - border: 1px solid #339900; - padding: 10px 10px 10px 90px; - margin: 25px 0px; -} - -.msg .title { - font-size: 100%; - padding-top: 0; - margin-left: 20px; -} - -.toc, .list-of-examples { - padding-bottom: 20px; -} - -.list-of-examples dl { - padding-top: 5px; - padding-bottom: 5px; -} - -.list-of-examples dl dt { - padding-top: 3px; - padding-bottom: 2px; -} - -.toc p b, .list-of-examples p b { - color: #666; - font-size: 18px; -} - -.toc .chapter, .toc .appendix, .toc .part { - padding-top: 5px; - display: block; -} - -.toc .chapter a, .toc .appendix a, .toc .part a { - font-weight: bold; -} - -.toc dl dd dl dt { - padding-top: 3px; -} - -.toc dl dd dl dt dd { - padding-bottom: 5px; -} - -.toc dl dd dl dd dl dt { - padding-top: 0; - padding-bottom: 0; -} - -.toc dl dd dl dd dl dt span.section a { - /*font-size: 11px; */ - padding-bottom: 0; - font-weight: normal; -} - -.toc dd { - padding-bottom: 0; -} - -.inlinemediaobject img, .mediaobject img { - - vertical-align: middle; - /* - border: 1px solid #959595; - padding: 10px; - */ -} - -/* Modifications by lkemen + tsoulcie june the 13th 2007 */ - -/* FAQ */ - -div.qandaset .toc td { - padding-bottom: 20px; -} - -div.qandaset .toc td dl dt { - padding: 5px 10px 5px 25px; -} - -div.qandaset .toc td dl dt a { - font-weight: normal; -} - -div.qandaset dl dt { - padding-top: 5px; - padding-bottom: 5px; - font-weight: bold; -} - -div.qandaset dl dd dl { - padding-bottom: 10px; -} - -div.qandaset dl dd dt { - font-weight: normal; - padding: 5px 10px 5px 25px; - background: url( ../images/comment.gif ) no-repeat scroll 5px 5px; -} - -div.qandaset table { - margin: 0; - border: none; - width: 100%; -} - -div.qandaset table h3 { - padding-bottom: 15px; -} - -div.qandaset table tr td { - padding: 0; -} - -div.qandaset table tr.question p { - background: url( ../images/comment.gif ) no-repeat scroll 5px 5px; - padding: 5px 10px 5px 25px; - background-color: #f1f2f3; -} - -div.qandaset table tr p { - margin: 0 0 10px 0; - padding: 0; -} - -div.qandaset table tr.answer ul p, div.qandaset table tr.answer ol p { - padding-left: 0; -} - -div.qandaset table tr.answer p, div.qandaset table tr.answer div.itemizedlist, - div.qandaset table tr.answer div.orderedlist { - padding-left: 25px; -} - -div.qandaset table tr.answer td { - padding-bottom: 30px; -} - -.sect1 .mediaobject { - padding: 0px; - text-align: left; -} - -.sect1 .mediaobject table { - border: none; - width: auto; - margin: 10px 0px 0px 50px; - float: left; -} \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/resources/images/admons/caution.gif b/symmetric-assemble/src/docbook/resources/images/admons/caution.gif deleted file mode 100644 index d9f5e5b1bc..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/admons/caution.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/admons/important.gif b/symmetric-assemble/src/docbook/resources/images/admons/important.gif deleted file mode 100644 index d9f5e5b1bc..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/admons/important.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/admons/info.gif b/symmetric-assemble/src/docbook/resources/images/admons/info.gif deleted file mode 100644 index 62a8bb9a59..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/admons/info.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/admons/note.gif b/symmetric-assemble/src/docbook/resources/images/admons/note.gif deleted file mode 100644 index f329d359e5..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/admons/note.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/admons/tip.gif b/symmetric-assemble/src/docbook/resources/images/admons/tip.gif deleted file mode 100644 index 559453c1c7..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/admons/tip.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/admons/warning.gif b/symmetric-assemble/src/docbook/resources/images/admons/warning.gif deleted file mode 100644 index 91c23c65e8..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/admons/warning.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/banner-bg.gif b/symmetric-assemble/src/docbook/resources/images/banner-bg.gif deleted file mode 100644 index 2d2166284f..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/banner-bg.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/banner-logo.gif b/symmetric-assemble/src/docbook/resources/images/banner-logo.gif deleted file mode 100644 index d8212e69f6..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/banner-logo.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/book-bg.gif b/symmetric-assemble/src/docbook/resources/images/book-bg.gif deleted file mode 100644 index 3e495aa241..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/book-bg.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/bullet.gif b/symmetric-assemble/src/docbook/resources/images/bullet.gif deleted file mode 100644 index 900ce7bc38..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/bullet.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/1.png b/symmetric-assemble/src/docbook/resources/images/callouts/1.png deleted file mode 100644 index 7d473430b7..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/1.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/10.png b/symmetric-assemble/src/docbook/resources/images/callouts/10.png deleted file mode 100644 index 997bbc8246..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/10.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/2.png b/symmetric-assemble/src/docbook/resources/images/callouts/2.png deleted file mode 100644 index 5d09341b2f..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/2.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/3.png b/symmetric-assemble/src/docbook/resources/images/callouts/3.png deleted file mode 100644 index ef7b700471..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/3.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/4.png b/symmetric-assemble/src/docbook/resources/images/callouts/4.png deleted file mode 100644 index adb8364eb5..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/4.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/5.png b/symmetric-assemble/src/docbook/resources/images/callouts/5.png deleted file mode 100644 index 4d7eb46002..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/5.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/6.png b/symmetric-assemble/src/docbook/resources/images/callouts/6.png deleted file mode 100644 index 0ba694af6c..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/6.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/7.png b/symmetric-assemble/src/docbook/resources/images/callouts/7.png deleted file mode 100644 index 472e96f8ac..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/7.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/8.png b/symmetric-assemble/src/docbook/resources/images/callouts/8.png deleted file mode 100644 index 5e60973c21..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/8.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/callouts/9.png b/symmetric-assemble/src/docbook/resources/images/callouts/9.png deleted file mode 100644 index a0676d26cc..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/callouts/9.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/comment.gif b/symmetric-assemble/src/docbook/resources/images/comment.gif deleted file mode 100644 index 897cf2e3e4..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/comment.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/important.gif b/symmetric-assemble/src/docbook/resources/images/important.gif deleted file mode 100644 index d9f5e5b1bc..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/important.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/info.gif b/symmetric-assemble/src/docbook/resources/images/info.gif deleted file mode 100644 index 62a8bb9a59..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/info.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/success.png b/symmetric-assemble/src/docbook/resources/images/success.png deleted file mode 100644 index a1db4d6ae8..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/success.png and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/tip.gif b/symmetric-assemble/src/docbook/resources/images/tip.gif deleted file mode 100644 index 559453c1c7..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/tip.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/images/warning.gif b/symmetric-assemble/src/docbook/resources/images/warning.gif deleted file mode 100644 index 91c23c65e8..0000000000 Binary files a/symmetric-assemble/src/docbook/resources/images/warning.gif and /dev/null differ diff --git a/symmetric-assemble/src/docbook/resources/xsl/docbook-fopdf.xsl b/symmetric-assemble/src/docbook/resources/xsl/docbook-fopdf.xsl deleted file mode 100644 index 0fb6f6b573..0000000000 --- a/symmetric-assemble/src/docbook/resources/xsl/docbook-fopdf.xsl +++ /dev/null @@ -1,507 +0,0 @@ - - - - - - -]> - - - - - - - - - - blue - - - - - - - - - - - - - - - - - - - - - - - - - x - - - - - - - - - - - - - - - - - - - - - - - - - - - - Copyright ©right; 2007 - 2013 - - - , - - - - - - - - - - - - - - - - - - - - - - - no - GIF - - - - - - - - - - - - - - -5em - -5em - - - - - - - - - - - SymmetricDS - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1 - 1 - 1 - - 1 - - - - - - book toc,title - - - - 3 - - - - - - - - - USletter - 1 - 0 - 1 - - - 5mm - 10mm - 10mm - - 15mm - 10mm - 0mm - - 18mm - 18mm - - - 0pc - - - - - - start - false - - - 12 - 10 - - - 1.3 - - - - - - - 0.7em - - - - - - 18cm - - - 10pt - - - - - 4pt - 4pt - 4pt - 4pt - - - - 0.1pt - 0.1pt - - - - - 1 - - - - - - - - left - bold - - - pt - - - - - - - - - - - - - - - 0.8em - 0.8em - 0.8em - - - pt - - 0.1em - 0.1em - 0.1em - - - 0.6em - 0.6em - 0.6em - - - pt - - 0.1em - 0.1em - 0.1em - - - 0.4em - 0.4em - 0.4em - - - pt - - 0.1em - 0.1em - 0.1em - - - - - bold - - - pt - - false - 0.4em - 0.6em - 0.8em - - - - - - - - - pt - - - - - 1em - 1em - 1em - #444444 - solid - 0.1pt - 0.5em - 0.5em - 0.5em - 0.5em - 0.5em - 0.5em - - - - 1 - - #EEEEEE - #CCCCCC - - - - - - 0 - 1 - - - 90 - - - - - - resources/images/admons/ - .gif - - - - - - - - figure after - example before - equation before - table before - procedure before - - - - 1 - - - - -0.3em - -0.5em - 0em - 0.8em - 0.4em - 1.2em - 1.6em - - - - - - - - - - - - - - - - - - - diff --git a/symmetric-assemble/src/docbook/resources/xsl/docbook-html-chunk.xsl b/symmetric-assemble/src/docbook/resources/xsl/docbook-html-chunk.xsl deleted file mode 100644 index 43431e815c..0000000000 --- a/symmetric-assemble/src/docbook/resources/xsl/docbook-html-chunk.xsl +++ /dev/null @@ -1,268 +0,0 @@ - - - - - - - - - - - - - - - - - - - 0 - 1 - - - - ./ - - - 1 - 1 - 1 - 0 - - - - - - book toc,title - chapter toc,title - book/section toc,title - qandadiv nop - qandaset toc - - - - 3 - - - - - 1 - 1 - 1 - 1 - 3 - - - - - 1 - 90 - - - - - - figure after - example after - equation after - table after - procedure after - - - - , - - - - - - - - -
-

Authors

-

- -

-
- - - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - -
diff --git a/symmetric-assemble/src/docbook/resources/xsl/docbook-html.xsl b/symmetric-assemble/src/docbook/resources/xsl/docbook-html.xsl deleted file mode 100644 index 337cf37237..0000000000 --- a/symmetric-assemble/src/docbook/resources/xsl/docbook-html.xsl +++ /dev/null @@ -1,150 +0,0 @@ - - - - - - - - - - - - - - - - - - - html.css - - - 1 - 1 - 1 - 0 - - - - - - - book toc,title - chapter toc - article/appendix toc - qandadiv nop - qandaset toc - - - - 3 - - - - - - 1 - 1 - 1 - 1 - 3 - - - - - 1 - - - 90 - - - - - 0 - - - - - figure after - example after - equation after - table after - procedure after - - - - , - - - - - - - - -
-

Authors

-

- -

-
- - - - - - - - - - - - - - -
diff --git a/symmetric-assemble/src/docbook/setup.xml b/symmetric-assemble/src/docbook/setup.xml deleted file mode 100644 index 029feece01..0000000000 --- a/symmetric-assemble/src/docbook/setup.xml +++ /dev/null @@ -1,202 +0,0 @@ - - - - - Setup - -
- Engine Files - - Each node requires properties that allow it to connect to a database and - register with a parent node. Properties are configured in a file named - xxxxx.properties - that is placed in the engines directory of the SymmetricDS install. The - file is usually named according to the engine.name, but it is not a - requirement. - - - - To give a node its identity, the following properties are required. Any - other documented parameters can be overridden for a specific engine in an engine's properties file. - Note that you can use the variable - $(hostName) - to represent the host name of the machine when defining these properties - (for example, external.id=$(hostName) ). - - - - - - engine.name - - - - This is an arbitrary name that is used to access a specific - engine using an HTTP URL. Each node configured in the engines directory - must have a unique engine name. The engine name is also used for the - domain name of registered JMX beans. - - - - - - group.id - - - - The node group that this node is a member of. - Synchronization is specified between node groups, which means you only - need to specify it once for multiple nodes in the same group. - - - - - - external.id - - - - The external id for this node has meaning to the user and - provides integration into the system where it is deployed. For example, - it might be a retail store number or a region number. The external id - can be used in expressions for conditional and subset data - synchronization. Behind the scenes, each node has a unique sequence - number for tracking synchronization events. That makes it possible to - assign the same external id to multiple nodes, if desired. - - - - - - sync.url - - - - - The URL where this node can be contacted for synchronization. At startup - and during each heartbeat, the node updates its entry in the database - with this URL. The sync url is of the format: - http://{hostname}:{port}/{webcontext}/sync/{engine.name} - . - - - The {webcontext} is blank for a standalone deployment. It - will typically be the name of the war file for an application server - deployment. - - The {engine.name} can be left blank if there is only one - engine deployed in a SymmetricDS server. - - - - - When a new node is first started, it is has no information - about synchronizing. It contacts the registration server in order to - join the network and receive its configuration. The configuration for - all nodes is stored on the registration server, and the URL must be - specified in the following property: - - - - - registration.url - - - - The URL where this node can connect for registration to - receive its configuration. The registration server is part of - SymmetricDS and is enabled as part of the deployment. This is typically - equal to the value of the sync.url of the registration server. - - - - - - - Note that a - registration server node - is defined as one whose - registration.url - is either (a) blank, or (b) identical to its - sync.url - . - - - - For a deployment where the database connection pool should - be created using a JDBC driver, set the following properties: - - - - - db.driver - - - - The class name of the JDBC driver. - - - - - - db.url - - - - The JDBC URL used to connect to the database. - - - - - - db.user - - - - The database username, which is used to login, create, and - update SymmetricDS tables. - - - - - - db.password - - - - The password for the database user. - - - - - See , for additional parameters that can be specified in the engine properties file. -
-
diff --git a/symmetric-assemble/src/docbook/upgrade.xml b/symmetric-assemble/src/docbook/upgrade.xml deleted file mode 100644 index 9537d268ad..0000000000 --- a/symmetric-assemble/src/docbook/upgrade.xml +++ /dev/null @@ -1,160 +0,0 @@ - - - - Upgrading from 2.x - - Please test carefully when upgrading SymmetricDS 2 to SymmetricDS 3. Note that - - table's primary key changed. The automatic upgrade backs up and copies the table. This might take some time if the - table is large. - - - The following parameters are no longer supported: - - - - db.spring.bean.name - - The connection pool is no longer wired in via the Spring Framework - - - - - db.tx.timeout.seconds - - Transactions are no longer managed by the Spring Framework - - - - - db.default.schema - - The default schema is always the schema associated with the database user - - - - - db.jndi.name - - JNDI data sources are no longer supported - - - - - auto.upgrade - - Database upgrade is controlled by - auto.config.database - - - - - routing.data.reader.type - - As of this release, there is only one data reader type. - - - - - job.purge.max.num.data.events.to.delete.in.tx - - The name of this property changed to - job.purge.max.num.data.event.batches.to.delete.in.tx - - - - - web.base.servlet.path - - No longer needed - - - - - dataloader.allow.missing.delete - - Controlled by conflict detection and resolution - - - - - dataloader.enable.fallback.insert - - Controlled by conflict detection and resolution - - - - - dataloader.enable.fallback.update - - Controlled by conflict detection and resolution - - - - - dataloader.enable.fallback.savepoint - - No longer needed - - - - - db.force.delimited.identifier.mode.on - - No longer needed - - - - - db.force.delimited.identifier.mode.off - - No longer needed - - - - - - The way extension points work has changed. SymmetricDS services are no longer Spring injectable into extension - points. Please use the - ISymmetricEngineAware - interface to get a handle to the engine which gives access to services. - - - The following extension points are no longer supported: - - - - IDataLoaderFilter - - Replaced by IDatabaseWriterFilter - - - - - IBatchListener - - Replaced by IDatabaseWriterFilter - - - - - IExtractorFilter - - No longer supported. Rarely used. - - - - - IColumnFilter - - No longer needed. Please use the transformation feature. - - - - - \ No newline at end of file diff --git a/symmetric-assemble/src/docbook/user-guide.xml b/symmetric-assemble/src/docbook/user-guide.xml deleted file mode 100644 index bf4b118c4d..0000000000 --- a/symmetric-assemble/src/docbook/user-guide.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - SymmetricDS User Guide - - - Eric - Long - - - Chris - Henson - - - Mark - Hanes - - - Greg - Wilmer - - - Version 3.6 for SymmetricDS - - 2007 - 2014 - JumpMind, Inc - - - v3.6 - - - - Permission to use, copy, modify, and distribute the SymmetricDS User Guide Version - 3.6 for any purpose and without fee is hereby granted in perpetuity, provided that - the above copyright notice and this paragraph appear in all copies. - - - - - Preface - - SymmetricDS is an open-source, web-enabled, database independent, data synchronization software application. It uses - web and database technologies to replicate tables between relational databases in near - real time. The software was designed to scale for a large number of databases, work - across low-bandwidth connections, and withstand periods of network outages. - - - This User Guide introduces SymmetricDS and its uses for data synchronization. It is - intended for users who want to be quickly familiarized with the software, configure it, - and use its many features. This version of the guide was generated on at . - - - - - - - - - - - - - - - - - - diff --git a/symmetric-assemble/src/docbook/version-numbering.xml b/symmetric-assemble/src/docbook/version-numbering.xml deleted file mode 100644 index ad81f0bbdb..0000000000 --- a/symmetric-assemble/src/docbook/version-numbering.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - Version Numbering - - The software is released with a version number based on the - - Apache Portable Runtime Project - - version guidelines. In summary, the version is denoted as three integers in the - format of MAJOR.MINOR.PATCH. Major versions are incompatible at the API level, and - they can include any kind of change. Minor versions are compatible with older - versions at the API and binary level, and they can introduce new functions or remove - old ones. Patch versions are perfectly compatible, and they are released to fix - defects. - - diff --git a/symmetric-assemble/src/torque/doc/Control.vm b/symmetric-assemble/src/torque/doc/Control.vm deleted file mode 100644 index 761353f486..0000000000 --- a/symmetric-assemble/src/torque/doc/Control.vm +++ /dev/null @@ -1,37 +0,0 @@ -## Licensed to the Apache Software Foundation (ASF) under one -## or more contributor license agreements. See the NOTICE file -## distributed with this work for additional information -## regarding copyright ownership. The ASF licenses this file -## to you under the Apache License, Version 2.0 (the -## "License"); you may not use this file except in compliance -## with the License. You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, -## software distributed under the License is distributed on an -## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -## KIND, either express or implied. See the License for the -## specific language governing permissions and limitations -## under the License. -#set ( $basePath = $generator.OutputPath) - -#foreach ($dataModel in $dataModels) - - #if ($outputFormat=="html") - #set ( $outFile = "${dataModel.FileName}.html" ) - #else - #set ( $outFile = "${dataModel.FileName}.xml" ) - #end - - File to be created: $outFile - - $generator.parse("doc/$outputFormat/datamodel.vm",$outFile,"dataModel",$dataModel) -#end - -#if ($outputFormat=="html") - #set ( $outFile = "dataModel.css" ) - #if (!$files.file($basePath,$outFile).exists()) - $generator.parse("doc/$outputFormat/datamodel.css.vm",$outFile,"dataModel.css",$dataModel) - #end -#end diff --git a/symmetric-assemble/src/torque/doc/docbook/datamodel.css.vm b/symmetric-assemble/src/torque/doc/docbook/datamodel.css.vm deleted file mode 100644 index 2b3f1a9290..0000000000 --- a/symmetric-assemble/src/torque/doc/docbook/datamodel.css.vm +++ /dev/null @@ -1,117 +0,0 @@ -## Licensed to the Apache Software Foundation (ASF) under one -## or more contributor license agreements. See the NOTICE file -## distributed with this work for additional information -## regarding copyright ownership. The ASF licenses this file -## to you under the Apache License, Version 2.0 (the -## "License"); you may not use this file except in compliance -## with the License. You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, -## software distributed under the License is distributed on an -## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -## KIND, either express or implied. See the License for the -## specific language governing permissions and limitations -## under the License. -/* -Copyright 2001-2005 The Apache Software Foundation. - -Licensed under the Apache License, Version 2.0 (the "License") -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -=============================================================== -css stylesheet for the html documentation generated by Torque -will not be overwritten any more once the file exists -=============================================================== -*/ - -/* for the html body */ -body { font-family: Verdana; font-size: 10pt; color:black; } -/* for all links */ -.link { } - -/* for the whole summary table */ -.summarytable { margin:0px; padding:0px; border-collapse:collapse; width:100%; } -/* for all td's of the summary table */ -.summaryborder { border:1px solid black; } - -/* for the headline of the summary table */ -.summaryhead { } -/* for all td's of the headline of the summary table */ -.summaryheadbackground { background-color:#CCCCCC; } -/* for the different cells of the headline of the summary table */ -/* e.g. use display:none for not displaying a certain column */ -.summaryheadtablename { } -.summaryheadjavaname { } -.summaryheaddescription { } - -/* for all regular rows of the summary table */ -.summaryrow { } -/* for all td's in regular rows of the summary table */ -.summarybackground { background-color:white; } -/* for the different cells in the regular rows of the summary table */ -/* e.g. use display:none for not displaying a certain column */ -.summarytablename { } -.summaryjavaname { } -.summarydescription { } - -/* for the hrefs in the summary table */ -.summarytablenamelink { } - -/* for the headline of the detail tables */ -.detailtable { margin:0px; padding:0px; border-collapse:collapse; width:100%; } -/* for all td's of the summary table */ -.detailborder { border:1px solid black; } - -/* for the headline of the detail tables */ -.detailhead { } -/* for all td's in the headline of the detail tables */ -.detailheadbackground { background-color:#CCCCCC; } -/* for the different cells in the headline of the detail tables */ -/* e.g. use display:none for not displaying a certain column */ -.detailheadname { } -.detailheadtype { } -.detailheadsize { } -.detailheaddefault { } -.detailheadjavaname { } -.detailheadpk { } -.detailheadfk { } -.detailheadnotnull { } -.detailheaddescription { } - -/* for all regular rows in the detail tables */ -.detailrow { } -/* for all td's in the regular rows in the detail tables */ -.detailbackground { background-color:white; } -/* for the different cells in the regular rows of the detail tables */ -/* e.g use display:none for not displaying a certain column */ -.detailname { } -.detailtype { } -.detailsize { } -.detaildefault { } -.detailjavaname { } -.detailpk { } -.detailfk { } -.detailnotnull { } -.detaildescription { } - -/* for the links to the referenced tables for foreign key column names */ -.detailforeignkeylink { color:#44AA44; } -/* for the name cell of primary keys */ -.primarykey { background-color:#FFCCCC; } -/* for the name cell of foreign keys */ -.foreignkey { } -/* for the name cell of not null columns */ -.notnull { } diff --git a/symmetric-assemble/src/torque/doc/docbook/datamodel.vm b/symmetric-assemble/src/torque/doc/docbook/datamodel.vm deleted file mode 100644 index edf11897c1..0000000000 --- a/symmetric-assemble/src/torque/doc/docbook/datamodel.vm +++ /dev/null @@ -1,65 +0,0 @@ - -## Licensed to the Apache Software Foundation (ASF) under one -## or more contributor license agreements. See the NOTICE file -## distributed with this work for additional information -## regarding copyright ownership. The ASF licenses this file -## to you under the Apache License, Version 2.0 (the -## "License"); you may not use this file except in compliance -## with the License. You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, -## software distributed under the License is distributed on an -## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -## KIND, either express or implied. See the License for the -## specific language governing permissions and limitations -## under the License. -#set ( $database = $dataModel ) - - - Data Model - - What follows is the complete SymmetricDS data model. - Note that all tables are prepended with a configurable prefix so that multiple instances of SymmetricDS may coexist in the - same database. The default prefix is - sym_. - - - - SymmetricDS configuration is entered by the user into the data model to control the behavior of what data is synchronized - to which nodes. - - -
- Configuration Data Model - - - - - -
-
- - At runtime, the configuration is used to capture data changes and route them to nodes. The data changes are placed - together in a single unit called a batch that can be loaded by another node. Outgoing batches are delivered to nodes - and acknowledged. Incoming batches are received and loaded. History is recorded for batch status changes and - statistics. - -
- Runtime Data Model - - - - - -
-
- - #foreach ($tbl in $database.tables) - $generator.parse("doc/docbook/table.vm",$outFile,"table",$tbl) - #end - -
diff --git a/symmetric-assemble/src/torque/doc/docbook/table.vm b/symmetric-assemble/src/torque/doc/docbook/table.vm deleted file mode 100644 index b5e110d52e..0000000000 --- a/symmetric-assemble/src/torque/doc/docbook/table.vm +++ /dev/null @@ -1,60 +0,0 @@ -## Licensed to the Apache Software Foundation (ASF) under one -## or more contributor license agreements. See the NOTICE file -## distributed with this work for additional information -## regarding copyright ownership. The ASF licenses this file -## to you under the Apache License, Version 2.0 (the -## "License"); you may not use this file except in compliance -## with the License. You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, -## software distributed under the License is distributed on an -## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -## KIND, either express or implied. See the License for the -## specific language governing permissions and limitations -## under the License. - -
- $table.Name.toUpperCase() - $table.Description - - $table.Name.toUpperCase() - - - - - - - - - - Name - Type / Size - Default - PK FK - not null - Description - - - - #foreach ($col in $table.Columns) - - - #if ($col.isForeignKey() == true) - $col.Name - #else$col.Name.toUpperCase()#end - - $col.Type #if ($col.printSize() && $col.printSize().length() > 0) $col.printSize()#end - - #if ($col.DefaultValue) $col.DefaultValue#end - #if ($col.isPrimaryKey()==true)PK#end - #if ($col.isForeignKey()==true)FK#end - #if ($col.isNotNull()==true)X#end - #if ($col.Description) $col.Description#end - - #end - - -
-
diff --git a/symmetric-client/src/main/java/org/jumpmind/symmetric/db/db2/Db2SymmetricDialect.java b/symmetric-client/src/main/java/org/jumpmind/symmetric/db/db2/Db2SymmetricDialect.java index 1a4bbce643..e7f5ef8c0d 100644 --- a/symmetric-client/src/main/java/org/jumpmind/symmetric/db/db2/Db2SymmetricDialect.java +++ b/symmetric-client/src/main/java/org/jumpmind/symmetric/db/db2/Db2SymmetricDialect.java @@ -23,6 +23,7 @@ import org.jumpmind.db.platform.IDatabasePlatform; import org.jumpmind.db.sql.ISqlTransaction; import org.jumpmind.db.util.BinaryEncoding; +import org.jumpmind.symmetric.common.ParameterConstants; import org.jumpmind.symmetric.db.AbstractSymmetricDialect; import org.jumpmind.symmetric.db.ISymmetricDialect; import org.jumpmind.symmetric.model.Channel; @@ -34,13 +35,13 @@ */ public class Db2SymmetricDialect extends AbstractSymmetricDialect implements ISymmetricDialect { - // DB2 Variables - public static final String VAR_SOURCE_NODE_ID = "_source_node_id"; - public static final String VAR_TRIGGER_DISABLED = "_trigger_disabled"; - - public static final String FUNCTION_TRANSACTION_ID = "_transactionid"; - static final String SQL_DROP_FUNCTION = "DROP FUNCTION $(functionName)"; - + // DB2 Variables + public static final String VAR_SOURCE_NODE_ID = "_source_node_id"; + public static final String VAR_TRIGGER_DISABLED = "_trigger_disabled"; + + public static final String FUNCTION_TRANSACTION_ID = "_transactionid"; + static final String SQL_DROP_FUNCTION = "DROP FUNCTION $(functionName)"; + public Db2SymmetricDialect(IParameterService parameterService, IDatabasePlatform platform) { super(parameterService, platform); this.triggerTemplate = new Db2TriggerTemplate(this); @@ -50,76 +51,90 @@ public boolean createOrAlterTablesIfNecessary(String... tables) { boolean tablesCreated = super.createOrAlterTablesIfNecessary(tables); if (tablesCreated) { log.info("Resetting auto increment columns for {}", parameterService.getTablePrefix() + "_data"); - long dataId = platform.getSqlTemplate().queryForLong("select max(data_id) from " + parameterService.getTablePrefix() - + "_data") + 1; - platform.getSqlTemplate().update("alter table " + parameterService.getTablePrefix() - + "_data alter column data_id restart with " + dataId); + long dataId = platform.getSqlTemplate().queryForLong("select max(data_id) from " + parameterService.getTablePrefix() + "_data") + + 1; + platform.getSqlTemplate() + .update("alter table " + parameterService.getTablePrefix() + "_data alter column data_id restart with " + dataId); } return tablesCreated; } @Override - protected boolean doesTriggerExistOnPlatform(String catalog, String schema, String tableName, - String triggerName) { - schema = schema == null ? (platform.getDefaultSchema() == null ? null : platform - .getDefaultSchema()) : schema; + protected boolean doesTriggerExistOnPlatform(String catalog, String schema, String tableName, String triggerName) { + schema = schema == null ? (platform.getDefaultSchema() == null ? null : platform.getDefaultSchema()) : schema; return platform.getSqlTemplate().queryForInt( "SELECT COUNT(*) FROM " + getSystemSchemaName() + ".SYSTRIGGERS WHERE NAME = ? AND SCHEMA = ?", new Object[] { triggerName.toUpperCase(), schema.toUpperCase() }) > 0; } - + @Override public String massageDataExtractionSql(String sql, Channel channel) { - /* Remove tranaction_id from the sql because DB2 doesn't support transactions. In fact, - * DB2 iSeries does return results because the query asks for every column in the table PLUS - * the router_id. We max out the size of the table on iSeries so when you try to return the + /* + * Remove tranaction_id from the sql because DB2 doesn't support + * transactions. In fact, DB2 iSeries does return results because the + * query asks for every column in the table PLUS the router_id. We max + * out the size of the table on iSeries so when you try to return the * entire table + additional columns we go past the max size for a row */ - sql = sql.replace("d.transaction_id, ", ""); + if (!this.getParameterService().is(ParameterConstants.DB2_CAPTURE_TRANSACTION_ID, false)) { + sql = sql.replace("d.transaction_id, ", ""); + } return super.massageDataExtractionSql(sql, channel); } - + protected String getSystemSchemaName() { - return "SYSIBM"; + return "SYSIBM"; } @Override - public void createRequiredDatabaseObjects() { + public void createRequiredDatabaseObjects() { String sql = "select " + getSourceNodeExpression() + " from " + parameterService.getTablePrefix() + "_node_identity"; - try { - platform.getSqlTemplate().query(sql); + try { + platform.getSqlTemplate().query(sql); + } catch (Exception e) { + log.debug("Failed checking for variable (usually means it doesn't exist yet) '" + sql + "'", e); + platform.getSqlTemplate().update("create variable " + getSourceNodeExpression() + " varchar(50)"); } - catch (Exception e) { - log.debug("Failed checking for variable (usually means it doesn't exist yet) '" + sql + "'", e); - platform.getSqlTemplate().update("create variable " + getSourceNodeExpression() + " varchar(50)"); - } - sql = "select " + parameterService.getTablePrefix() + VAR_TRIGGER_DISABLED + " from " + parameterService.getTablePrefix() + "_node_identity"; - try { - platform.getSqlTemplate().query(sql); + sql = "select " + parameterService.getTablePrefix() + VAR_TRIGGER_DISABLED + " from " + parameterService.getTablePrefix() + + "_node_identity"; + try { + platform.getSqlTemplate().query(sql); + } catch (Exception e) { + log.debug("Failed checking for variable (usually means it doesn't exist yet) '" + sql + "'", e); + platform.getSqlTemplate().update("create variable " + parameterService.getTablePrefix() + VAR_TRIGGER_DISABLED + " varchar(50)"); } - catch (Exception e) { - log.debug("Failed checking for variable (usually means it doesn't exist yet) '" + sql + "'", e); - platform.getSqlTemplate().update("create variable " + parameterService.getTablePrefix() + VAR_TRIGGER_DISABLED + " varchar(50)"); - } - - String transactionIdFunction = this.parameterService.getTablePrefix() + FUNCTION_TRANSACTION_ID; - + + if (this.getParameterService().is(ParameterConstants.DB2_CAPTURE_TRANSACTION_ID, false)) { + String transactionIdFunction = this.parameterService.getTablePrefix() + FUNCTION_TRANSACTION_ID; + sql = "CREATE OR REPLACE FUNCTION $(functionName)() " + " RETURNS VARCHAR(100) " + " LANGUAGE SQL " + " READS SQL DATA " + " RETURN " - + " select c.application_id || '_' || u.uow_id " - + " from sysibmadm.mon_connection_summary c ,sysibmadm.mon_current_uow u " + + " select c.application_id || '_' || u.uow_id " + + " from sysibmadm.mon_connection_summary c ,sysibmadm.mon_current_uow u " + " where u.application_handle = c.application_handle and c.application_id = application_id() "; - - install(sql, transactionIdFunction); - } + try { + install(sql, transactionIdFunction); + } + catch (Exception e) { + log.warn("Unable to install function " + this.parameterService.getTablePrefix() + FUNCTION_TRANSACTION_ID); + } + } + } + @Override public void dropRequiredDatabaseObjects() { - String transactionIdFunction = this.parameterService.getTablePrefix() + FUNCTION_TRANSACTION_ID; - uninstall(SQL_DROP_FUNCTION, transactionIdFunction); + if (this.getParameterService().is(ParameterConstants.DB2_CAPTURE_TRANSACTION_ID, false)) { + String transactionIdFunction = this.parameterService.getTablePrefix() + FUNCTION_TRANSACTION_ID; + try { + uninstall(SQL_DROP_FUNCTION, transactionIdFunction); + } catch (Exception e) { + log.warn("Unable to uninstall function " + this.parameterService.getTablePrefix() + FUNCTION_TRANSACTION_ID); + } + } } @Override @@ -143,8 +158,8 @@ public void enableSyncTriggers(ISqlTransaction transaction) { } public void disableSyncTriggers(ISqlTransaction transaction, String nodeId) { - transaction.prepareAndExecute("set " + parameterService.getTablePrefix() + VAR_TRIGGER_DISABLED + " = 1"); - if (nodeId != null) { + transaction.prepareAndExecute("set " + parameterService.getTablePrefix() + VAR_TRIGGER_DISABLED + " = 1"); + if (nodeId != null) { transaction.prepareAndExecute("set " + getSourceNodeExpression() + " = '" + nodeId + "'"); } } @@ -154,17 +169,19 @@ public String getSyncTriggersExpression() { } @Override - public String getTransactionTriggerExpression(String defaultCatalog, String defaultSchema, - Trigger trigger) { - return "sym_transactionid()"; + public String getTransactionTriggerExpression(String defaultCatalog, String defaultSchema, Trigger trigger) { + if (this.getParameterService().is(ParameterConstants.DB2_CAPTURE_TRANSACTION_ID, false)) { + return "sym_transactionid()"; + } else { + return "null"; + } } @Override public boolean supportsTransactionId() { - return true; + return this.getParameterService().is(ParameterConstants.DB2_CAPTURE_TRANSACTION_ID, false); } - public void cleanDatabase() { } diff --git a/symmetric-client/src/main/java/org/jumpmind/symmetric/db/nuodb/NuoDbSymmetricDialect.java b/symmetric-client/src/main/java/org/jumpmind/symmetric/db/nuodb/NuoDbSymmetricDialect.java index a08f424da2..77c2f52ca0 100644 --- a/symmetric-client/src/main/java/org/jumpmind/symmetric/db/nuodb/NuoDbSymmetricDialect.java +++ b/symmetric-client/src/main/java/org/jumpmind/symmetric/db/nuodb/NuoDbSymmetricDialect.java @@ -20,9 +20,12 @@ */ package org.jumpmind.symmetric.db.nuodb; +import java.util.List; + import org.jumpmind.db.platform.IDatabasePlatform; import org.jumpmind.db.platform.PermissionType; import org.jumpmind.db.sql.ISqlTransaction; +import org.jumpmind.db.sql.mapper.StringMapper; import org.jumpmind.db.util.BinaryEncoding; import org.jumpmind.symmetric.common.ParameterConstants; import org.jumpmind.symmetric.db.AbstractSymmetricDialect; @@ -132,6 +135,18 @@ public void removeTrigger(StringBuilder sqlBuffer, String catalogName, String sc } } } + + @Override + public void cleanupTriggers() { + List names = platform.getSqlTemplate().query("select triggername from system.triggers where triggername like '"+parameterService.getTablePrefix().toUpperCase()+"_%'", new StringMapper()); + int count = 0; + for (String name : names) { + count += platform.getSqlTemplate().update("drop trigger " + name + " if exists"); + } + if (count > 0) { + log.info("Remove {} triggers", count); + } + } public void disableSyncTriggers(ISqlTransaction transaction, String nodeId) { transaction.prepareAndExecute("select " + this.parameterService.getTablePrefix() + "_set_session_variable('" + SYNC_TRIGGERS_DISABLED_USER_VARIABLE + "', '1') from dual"); diff --git a/symmetric-client/src/main/java/org/jumpmind/symmetric/io/MsSqlBulkDatabaseWriter.java b/symmetric-client/src/main/java/org/jumpmind/symmetric/io/MsSqlBulkDatabaseWriter.java index 22fc26f9c8..b3970bb1a0 100644 --- a/symmetric-client/src/main/java/org/jumpmind/symmetric/io/MsSqlBulkDatabaseWriter.java +++ b/symmetric-client/src/main/java/org/jumpmind/symmetric/io/MsSqlBulkDatabaseWriter.java @@ -36,6 +36,7 @@ import org.jumpmind.db.platform.IDatabasePlatform; import org.jumpmind.db.sql.JdbcSqlTransaction; import org.jumpmind.db.util.BinaryEncoding; +import org.jumpmind.symmetric.SymmetricException; import org.jumpmind.symmetric.io.data.CsvData; import org.jumpmind.symmetric.io.data.DataEventType; import org.jumpmind.symmetric.io.data.writer.DataWriterStatisticConstants; @@ -78,7 +79,19 @@ public MsSqlBulkDatabaseWriter(IDatabasePlatform platform, public boolean start(Table table) { this.table = table; - if (super.start(table)) { + if (super.start(table)) { + if (sourceTable != null && targetTable == null) { + String qualifiedName = sourceTable.getFullyQualifiedTableName(); + if (writerSettings.isIgnoreMissingTables()) { + if (!missingTables.contains(qualifiedName)) { + log.warn("Did not find the {} table in the target database", qualifiedName); + missingTables.add(qualifiedName); + } + } else { + throw new SymmetricException("Could not load the %s table. It is not in the target database", qualifiedName); + } + } + needsBinaryConversion = false; if (! batch.getBinaryEncoding().equals(BinaryEncoding.HEX)) { for (Column column : targetTable.getColumns()) { diff --git a/symmetric-client/src/main/java/org/jumpmind/symmetric/io/PostgresBulkDatabaseWriter.java b/symmetric-client/src/main/java/org/jumpmind/symmetric/io/PostgresBulkDatabaseWriter.java index 240a25a281..d7ded62be8 100644 --- a/symmetric-client/src/main/java/org/jumpmind/symmetric/io/PostgresBulkDatabaseWriter.java +++ b/symmetric-client/src/main/java/org/jumpmind/symmetric/io/PostgresBulkDatabaseWriter.java @@ -83,7 +83,7 @@ protected void bulkWrite(CsvData data) { } } } - + useDefaultDataWriter=false; switch (dataEventType) { case INSERT: startCopy(); @@ -114,6 +114,7 @@ protected void bulkWrite(CsvData data) { case DELETE: default: endCopy(); + useDefaultDataWriter=true; super.write(data); break; } diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/AbstractSymmetricEngine.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/AbstractSymmetricEngine.java index b2d3a70c54..328478222b 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/AbstractSymmetricEngine.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/AbstractSymmetricEngine.java @@ -568,12 +568,13 @@ protected boolean loadFromScriptIfProvided() { } if (fileUrl != null) { + log.info("Executing {} '{}' ({})", ParameterConstants.AUTO_CONFIGURE_REG_SVR_SQL_SCRIPT, sqlScript, fileUrl); new SqlScript(fileUrl, symmetricDialect.getPlatform().getSqlTemplate(), true, SqlScriptReader.QUERY_ENDS, getSymmetricDialect().getPlatform() .getSqlScriptReplacementTokens()).execute(); loaded = true; } else { - log.info("Could not find the sql script: {} to execute. We would have run it if we had found it"); + log.warn("Could not find the {}: '{}' to execute. We would have run it if we had found it", ParameterConstants.AUTO_CONFIGURE_REG_SVR_SQL_SCRIPT, sqlScript); } } } diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/common/ParameterConstants.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/common/ParameterConstants.java index e91840a6e9..6332b484be 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/common/ParameterConstants.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/common/ParameterConstants.java @@ -395,6 +395,8 @@ private ParameterConstants() { public final static String STATISTIC_MANAGER_CLASS = "statistic.manager.class"; + public final static String DB2_CAPTURE_TRANSACTION_ID = "db2.capture.transaction.id"; + public static Map getParameterMetaData() { return parameterMetaData; } diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/IDataService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/IDataService.java index 22d93951e9..b68b498519 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/IDataService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/IDataService.java @@ -70,10 +70,14 @@ public interface IDataService { */ public String sendSQL(String nodeId, String catalogName, String schemaName, String tableName, String sql); + public void insertReloadEvents(Node targetNode, boolean reverse, ProcessInfo processInfo, List activeHistories, List triggerRouters); + public void insertReloadEvents(Node targetNode, boolean reverse, ProcessInfo processInfo); public void insertReloadEvents(Node targetNode, boolean reverse, List reloadRequests, ProcessInfo processInfo); + public void insertReloadEvents(Node targetNode, boolean reverse, List reloadRequests, ProcessInfo processInfo, List activeHistories, List triggerRouters); + public boolean insertReloadEvent(TableReloadRequest request, boolean deleteAtClient); public long insertReloadEvent(ISqlTransaction transaction, Node targetNode, diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/ITriggerRouterService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/ITriggerRouterService.java index e4d94cbe03..682eb227b4 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/ITriggerRouterService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/ITriggerRouterService.java @@ -48,7 +48,7 @@ public interface ITriggerRouterService { public List getActiveTriggerHistories(String tableName); - public List getTriggerRouters(boolean refreshCache); + public List getTriggerRouters(boolean refreshCache); /** * Return a list of triggers used when extraction configuration data during @@ -188,6 +188,9 @@ public TriggerHistory getNewestTriggerHistoryForTrigger(String triggerId, String public Map getFailedTriggers(); + public Map> fillTriggerRoutersByHistIdAndSortHist( + String sourceNodeGroupId, String targetNodeGroupId, List triggerHistories, List triggerRouters); + public Map> fillTriggerRoutersByHistIdAndSortHist( String sourceNodeGroupId, String targetNodeGroupId, List triggerHistories); diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataExtractorService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataExtractorService.java index ee54fa0803..bb2aabb6ef 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataExtractorService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataExtractorService.java @@ -861,6 +861,10 @@ protected OutgoingBatch extractOutgoingBatch(ProcessInfo processInfo, Node targe long extractTimeInMs = 0l; long byteCount = 0l; long transformTimeInMs = 0l; + + if (currentBatch.getStatus() == Status.NE) { + triggerReExtraction(currentBatch); + } if (currentBatch.getStatus() == Status.IG) { cleanupIgnoredBatch(sourceNode, targetNode, currentBatch, writer); @@ -963,6 +967,14 @@ protected OutgoingBatch extractOutgoingBatch(ProcessInfo processInfo, Node targe return currentBatch; } + protected void triggerReExtraction(OutgoingBatch currentBatch) { + // Allow user to reset batch status to NE in the DB to trigger a batch re-extract + IStagedResource resource = getStagedResource(currentBatch); + if (resource != null) { + resource.delete(); + } + } + protected ExtractDataReader buildExtractDataReader(Node sourceNode, Node targetNode, OutgoingBatch currentBatch, ProcessInfo processInfo) { return new ExtractDataReader(symmetricDialect.getPlatform(), new SelectFromSymDataSource(currentBatch, sourceNode, targetNode, processInfo)); diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataService.java index 77c871f162..df934e771b 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataService.java @@ -65,6 +65,7 @@ import org.jumpmind.symmetric.io.data.CsvData; import org.jumpmind.symmetric.io.data.CsvUtils; import org.jumpmind.symmetric.io.data.DataEventType; +import org.jumpmind.symmetric.io.data.transform.TransformPoint; import org.jumpmind.symmetric.job.PushHeartbeatListener; import org.jumpmind.symmetric.load.IReloadListener; import org.jumpmind.symmetric.model.Channel; @@ -347,15 +348,34 @@ private String getReloadChannelIdForTrigger(Trigger trigger, Map reloadRequests, ProcessInfo processInfo) { - + ITriggerRouterService triggerRouterService = engine.getTriggerRouterService(); + Node sourceNode = engine.getNodeService().findIdentity(); + insertReloadEvents(targetNode, reverse, reloadRequests, processInfo, triggerRouterService.getActiveTriggerHistories(), triggerRouterService.getAllTriggerRoutersForReloadForCurrentNode(sourceNode.getNodeGroupId(), targetNode.getNodeGroupId())); + } + + @Override + public void insertReloadEvents(Node targetNode, boolean reverse, ProcessInfo processInfo, List activeHistories, List triggerRouters) { + insertReloadEvents(targetNode, reverse, null, processInfo, activeHistories, triggerRouters); + } + + + @Override + public void insertReloadEvents(Node targetNode, boolean reverse, List reloadRequests, ProcessInfo processInfo, List activeHistories, List triggerRouters) { if (engine.getClusterService().lock(ClusterConstants.SYNCTRIGGERS)) { try { - synchronized (engine.getTriggerRouterService()) { + INodeService nodeService = engine.getNodeService(); + ITriggerRouterService triggerRouterService = engine.getTriggerRouterService(); + + synchronized (triggerRouterService) { engine.getClusterService().lock(ClusterConstants.SYNCTRIGGERS); boolean isFullLoad = reloadRequests == null @@ -374,10 +394,7 @@ public void insertReloadEvents(Node targetNode, boolean reverse, List triggerHistories = new ArrayList(); if (isFullLoad) { - triggerHistories = triggerRouterService.getActiveTriggerHistories(); + triggerHistories.addAll(activeHistories); if (reloadRequests != null && reloadRequests.size() == 1) { String channelId = reloadRequests.get(0).getChannelId(); if (channelId != null) { List channelTriggerHistories = new ArrayList(); for (TriggerHistory history : triggerHistories) { - if (channelId.equals(engine.getTriggerRouterService().getTriggerById(history.getTriggerId()).getChannelId())) { + if (channelId.equals(findChannelFor(history, triggerRouters))) { channelTriggerHistories.add(history); } } triggerHistories = channelTriggerHistories; } } - } - else { + } else { for (TableReloadRequest reloadRequest : reloadRequests) { triggerHistories.addAll(engine.getTriggerRouterService() .getActiveTriggerHistories(new Trigger(reloadRequest.getTriggerId(), null))); @@ -430,7 +447,7 @@ public void insertReloadEvents(Node targetNode, boolean reverse, List> triggerRoutersByHistoryId = triggerRouterService .fillTriggerRoutersByHistIdAndSortHist(sourceNode.getNodeGroupId(), - targetNode.getNodeGroupId(), triggerHistories); + targetNode.getNodeGroupId(), triggerHistories, triggerRouters); if (isFullLoad) { callReloadListeners(true, targetNode, transactional, transaction, loadId); @@ -541,6 +558,15 @@ public void insertReloadEvents(Node targetNode, boolean reverse, List triggerRouters) { + for (TriggerRouter triggerRouter : triggerRouters) { + if (triggerRouter.getTrigger().getTriggerId().equals(history.getTriggerId())) { + return triggerRouter.getTrigger().getChannelId(); + } + } + return null; + } @SuppressWarnings("unchecked") protected Map convertReloadListToMap(List reloadRequests) { @@ -821,7 +847,11 @@ private void insertSQLBatchesForReload(Node targetNode, long loadId, String crea if (reloadRequests != null && reloadRequests.size() > 0) { int sqlEventsSent = 0; - for (TriggerHistory triggerHistory : triggerHistories) { + + List copyTriggerHistories = new ArrayList(triggerHistories); + Collections.reverse(copyTriggerHistories); + + for (TriggerHistory triggerHistory : copyTriggerHistories) { List triggerRouters = triggerRoutersByHistoryId.get(triggerHistory .getTriggerHistoryId()); @@ -960,15 +990,18 @@ protected int getDataCountForReload(Table table, Node targetNode, String selectS protected int getTransformMultiplier(Table table, TriggerRouter triggerRouter) { int transformMultiplier = 0; - for (TransformService.TransformTableNodeGroupLink transform : engine.getTransformService().getTransformTables(false)) { - if (triggerRouter.getRouter().getNodeGroupLink().equals(transform.getNodeGroupLink()) && - transform.getSourceTableName().equals(table.getName())) { - transformMultiplier++; + List transforms = engine.getTransformService() + .findTransformsFor(triggerRouter.getRouter().getNodeGroupLink(), TransformPoint.EXTRACT); + if (transforms != null) { + for (TransformService.TransformTableNodeGroupLink transform : transforms) { + if (transform.getSourceTableName().equals(table.getName())) { + transformMultiplier++; + } } } transformMultiplier = Math.max(1, transformMultiplier); return transformMultiplier; - } + } private void insertFileSyncBatchForReload(Node targetNode, long loadId, String createBy, boolean transactional, ISqlTransaction transaction, ProcessInfo processInfo) { @@ -1256,7 +1289,7 @@ protected long insertData(ISqlTransaction transaction, final Data data) { } protected void insertDataEvent(ISqlTransaction transaction, DataEvent dataEvent) { - this.insertDataEvent(transaction, dataEvent.getDataId(), dataEvent.getBatchId(), + insertDataEvent(transaction, dataEvent.getDataId(), dataEvent.getBatchId(), dataEvent.getRouterId()); } @@ -1365,7 +1398,7 @@ public long insertDataAndDataEventAndOutgoingBatch(ISqlTransaction transaction, TriggerHistory history = data.getTriggerHistory(); if (history != null && channelId == null) { Trigger trigger = engine.getTriggerRouterService().getTriggerById( - history.getTriggerId()); + history.getTriggerId(), false); channelId = getReloadChannelIdForTrigger(trigger, engine.getConfigurationService() .getChannels(false)); } @@ -1422,7 +1455,7 @@ private void insertNodeSecurityUpdate(ISqlTransaction transaction, String nodeId Data data = createData(transaction, null, null, tablePrefix + "_node_security", " t.node_id = '" + nodeIdRecord + "'"); if (data != null) { - insertDataAndDataEventAndOutgoingBatch(transaction, data, targetNodeId, + insertDataAndDataEventAndOutgoingBatch(transaction, data, targetNodeId, Constants.UNKNOWN_ROUTER_ID, isLoad, loadId, createBy, Status.NE, channelId); } } diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/FileSyncService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/FileSyncService.java index 5db59ac481..197bc53faa 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/FileSyncService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/FileSyncService.java @@ -940,15 +940,17 @@ protected List processZip(InputStream is, String sourceNodeId, if (target != null) { ex = target; } - } else if (ex instanceof EvalError) { - log.error("Failed to evalulate the script:\n{}", script); } + + String nodeIdBatchId = sourceNodeId + "-" + batchId; - if (ex instanceof FileConflictException) { + if (ex instanceof EvalError) { + log.error("Failed to evalulate the script as part of file sync batch " + nodeIdBatchId + "\n" + script + "\n", ex); + } else if (ex instanceof FileConflictException) { log.error(ex.getMessage() + ". Failed to process file sync batch " - + batchId); + + nodeIdBatchId); } else { - log.error("Failed to process file sync batch " + batchId, ex); + log.error("Failed to process file sync for batch " + nodeIdBatchId, ex); } incomingBatch.setErrorFlag(true); diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/OutgoingBatchService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/OutgoingBatchService.java index 2e02e90d55..0deb7e654c 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/OutgoingBatchService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/OutgoingBatchService.java @@ -576,7 +576,7 @@ public List getNodesInError() { } public List getNextOutgoingBatchForEachNode() { - return sqlTemplate.query( + return sqlTemplateDirty.query( getSql("getNextOutgoingBatchForEachNodeSql"), new OutgoingBatchMapper(true, true)); } diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/RouterService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/RouterService.java index 8b4a4b8612..b8f4c11f0c 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/RouterService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/RouterService.java @@ -90,9 +90,11 @@ import org.jumpmind.symmetric.route.TransactionalBatchAlgorithm; import org.jumpmind.symmetric.service.ClusterConstants; import org.jumpmind.symmetric.service.IConfigurationService; +import org.jumpmind.symmetric.service.IDataService; import org.jumpmind.symmetric.service.IExtensionService; import org.jumpmind.symmetric.service.INodeService; import org.jumpmind.symmetric.service.IRouterService; +import org.jumpmind.symmetric.service.ITriggerRouterService; import org.jumpmind.symmetric.statistic.StatisticConstants; import org.jumpmind.util.FormatUtils; @@ -233,6 +235,8 @@ protected void insertInitialLoadEvents() { try { INodeService nodeService = engine.getNodeService(); + IDataService dataService = engine.getDataService(); + ITriggerRouterService triggerRouterService = engine.getTriggerRouterService(); Node identity = nodeService.findIdentity(); if (identity != null) { boolean isClusteringEnabled = parameterService.is(ParameterConstants.CLUSTER_LOCKING_ENABLED); @@ -242,13 +246,25 @@ protected void insertInitialLoadEvents() { .getRegistrationTime() != null)) { List nodeSecurities = findNodesThatAreReadyForInitialLoad(); + List activeHistories = triggerRouterService.getActiveTriggerHistories(); + Map> triggerRoutersByTargetNodeGroupId = new HashMap>(); + if (nodeSecurities != null && nodeSecurities.size() > 0) { gapDetector.setFullGapAnalysis(true); boolean reverseLoadFirst = parameterService .is(ParameterConstants.INITIAL_LOAD_REVERSE_FIRST); boolean isInitialLoadQueued = false; + + for (NodeSecurity security : nodeSecurities) { - if (engine.getTriggerRouterService().getActiveTriggerHistories().size() > 0) { + if (activeHistories.size() > 0) { + Node targetNode = engine.getNodeService().findNode(security.getNodeId()); + List triggerRouters = triggerRoutersByTargetNodeGroupId.get(targetNode.getNodeGroupId()); + if (triggerRouters == null) { + triggerRouters = triggerRouterService.getAllTriggerRoutersForReloadForCurrentNode(parameterService.getNodeGroupId(), targetNode.getNodeGroupId()); + triggerRoutersByTargetNodeGroupId.put(targetNode.getNodeGroupId(), triggerRouters); + } + boolean thisMySecurityRecord = security.getNodeId().equals( identity.getNodeId()); boolean reverseLoadQueued = security.isRevInitialLoadEnabled(); @@ -260,9 +276,9 @@ protected void insertInitialLoadEvents() { } else if (!thisMySecurityRecord && registered && initialLoadQueued && (!reverseLoadFirst || !reverseLoadQueued)) { long ts = System.currentTimeMillis(); - engine.getDataService().insertReloadEvents( - engine.getNodeService().findNode(security.getNodeId()), - false, processInfo); + dataService.insertReloadEvents( + targetNode, + false, processInfo, activeHistories, triggerRouters); isInitialLoadQueued = true; ts = System.currentTimeMillis() - ts; if (ts > Constants.LONG_OPERATION_THRESHOLD) { @@ -297,7 +313,7 @@ protected void insertInitialLoadEvents() { } } - processTableRequestLoads(identity, processInfo); + processTableRequestLoads(identity, processInfo, activeHistories, triggerRoutersByTargetNodeGroupId); } } @@ -309,7 +325,7 @@ protected void insertInitialLoadEvents() { } - public void processTableRequestLoads(Node source, ProcessInfo processInfo) { + public void processTableRequestLoads(Node source, ProcessInfo processInfo, List activeHistories, Map> triggerRoutersByTargetNodeGroupId) { List loadsToProcess = engine.getDataService().getTableReloadRequestToProcess(source.getNodeId()); if (loadsToProcess.size() > 0) { processInfo.setStatus(ProcessInfo.Status.CREATING); @@ -345,9 +361,17 @@ public void processTableRequestLoads(Node source, ProcessInfo processInfo) { } for (Map.Entry> entry : requestsSplitByLoad.entrySet()) { + Node targetNode = engine.getNodeService().findNode(entry.getKey().split("::")[0]); + ITriggerRouterService triggerRouterService = engine.getTriggerRouterService(); + List triggerRouters = triggerRoutersByTargetNodeGroupId.get(targetNode.getNodeGroupId()); + if (triggerRouters == null) { + triggerRouters = triggerRouterService.getAllTriggerRoutersForReloadForCurrentNode(parameterService.getNodeGroupId(), targetNode.getNodeGroupId()); + triggerRoutersByTargetNodeGroupId.put(targetNode.getNodeGroupId(), triggerRouters); + } + engine.getDataService().insertReloadEvents( - engine.getNodeService().findNode(entry.getKey().split("::")[0]), - false, entry.getValue(), processInfo); + targetNode, + false, entry.getValue(), processInfo, activeHistories, triggerRouters); } diff --git a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/TriggerRouterService.java b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/TriggerRouterService.java index ecabe431c0..3dbcabe91e 100644 --- a/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/TriggerRouterService.java +++ b/symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/TriggerRouterService.java @@ -2089,21 +2089,38 @@ public Map getFailedTriggers() { } public TriggerHistory findTriggerHistoryForGenericSync() { - String triggerTableName = TableConstants.getTableName(tablePrefix, - TableConstants.SYM_NODE); - TriggerHistory history = findTriggerHistory(null, null, triggerTableName - .toUpperCase()); + String triggerTableName = TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE); + try { + Collection histories = historyMap.values(); + for (TriggerHistory triggerHistory : histories) { + if (triggerHistory.getSourceTableName().equalsIgnoreCase(triggerTableName) && triggerHistory.getInactiveTime() == null) { + return triggerHistory; + } + } + } catch (Exception e) { + log.warn("Failed to find trigger history for generic sync", e); + } + TriggerHistory history = findTriggerHistory(null, null, triggerTableName.toUpperCase()); if (history == null) { history = findTriggerHistory(null, null, triggerTableName); } return history; } + @Override public Map> fillTriggerRoutersByHistIdAndSortHist( String sourceNodeGroupId, String targetNodeGroupId, List triggerHistories) { + return fillTriggerRoutersByHistIdAndSortHist(sourceNodeGroupId, targetNodeGroupId, triggerHistories, getAllTriggerRoutersForReloadForCurrentNode( + sourceNodeGroupId, targetNodeGroupId)); + } + + @Override + public Map> fillTriggerRoutersByHistIdAndSortHist( + String sourceNodeGroupId, String targetNodeGroupId, List triggerHistories, List triggerRouters) { + final Map> triggerRoutersByHistoryId = fillTriggerRoutersByHistId( - sourceNodeGroupId, targetNodeGroupId, triggerHistories); + sourceNodeGroupId, targetNodeGroupId, triggerHistories, triggerRouters); final List sortedTables = getSortedTablesFor(triggerHistories); Comparator comparator = new Comparator() { @@ -2147,13 +2164,19 @@ public int compare(TriggerHistory o1, TriggerHistory o2) { return triggerRoutersByHistoryId; } - + + + @Override public Map> fillTriggerRoutersByHistId( String sourceNodeGroupId, String targetNodeGroupId, List triggerHistories) { + return fillTriggerRoutersByHistId(sourceNodeGroupId, targetNodeGroupId, triggerHistories, getAllTriggerRoutersForReloadForCurrentNode( + sourceNodeGroupId, targetNodeGroupId)); + } + + protected Map> fillTriggerRoutersByHistId( + String sourceNodeGroupId, String targetNodeGroupId, List triggerHistories, List triggerRouters) { - List triggerRouters = new ArrayList( - getAllTriggerRoutersForReloadForCurrentNode( - sourceNodeGroupId, targetNodeGroupId)); + triggerRouters = new ArrayList(triggerRouters); Map> triggerRoutersByHistoryId = new HashMap>( triggerHistories.size()); @@ -2184,7 +2207,8 @@ protected List
getSortedTablesFor(List histories) { tables.add(table); } } - return Database.sortByForeignKeys(tables); + //return Database.sortByForeignKeys(tables); + return Database.sortByForeignKeys(tables, null); } protected void awaitTermination(ExecutorService executor, List> futures) { diff --git a/symmetric-core/src/main/resources/symmetric-default.properties b/symmetric-core/src/main/resources/symmetric-default.properties index 57ccd7b818..5c1d420bac 100644 --- a/symmetric-core/src/main/resources/symmetric-default.properties +++ b/symmetric-core/src/main/resources/symmetric-default.properties @@ -1321,7 +1321,7 @@ cache.table.time.ms=3600000 # # DatabaseOverridable: true # Tags: other -cache.channel.time.ms=60000 +cache.channel.time.ms=600000 # This is the amount of time monitor entries will be cached before re-reading them from the database. # @@ -1418,6 +1418,11 @@ db2.zseries.version=DSN08015 # Tags: AS400 as400.cast.clob.to=DBCLOB +# Turn on the capture of transaction id for DB2 systems that support it. +# DatabaseOverridable: false +# Tags: DB2 +db2.capture.transaction.id=false + # Specify the type of line feed to use in JMX console methods. Possible values are: text or html. # Tags: other jmx.line.feed=text diff --git a/symmetric-db/src/main/java/org/jumpmind/db/model/Database.java b/symmetric-db/src/main/java/org/jumpmind/db/model/Database.java index b27d247681..fd2be8c450 100644 --- a/symmetric-db/src/main/java/org/jumpmind/db/model/Database.java +++ b/symmetric-db/src/main/java/org/jumpmind/db/model/Database.java @@ -23,11 +23,13 @@ import java.sql.Types; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; @@ -64,18 +66,6 @@ public class Database implements Serializable, Cloneable { private Map tableIndexCache = new HashMap(); - public static Table[] sortByForeignKeys(Table... tables) { - if (tables != null) { - List
list = new ArrayList
(tables.length); - for (Table table : tables) { - list.add(table); - } - list = sortByForeignKeys(list); - tables = list.toArray(new Table[list.size()]); - } - return tables; - } - /** * Implements modified topological sort of tables (@see topological @@ -88,48 +78,79 @@ public static Table[] sortByForeignKeys(Table... tables) { * foreign key for table B then table B will precede table A in the * list. */ - public static List
sortByForeignKeys(List
tables) { - List
sorted = new ArrayList
(tables.size()); - List
visited = new ArrayList
(tables.size()); - List
stack = new ArrayList
(); - Map tableMap = new HashMap(); - - for (int i = 0; i < tables.size(); i++) { - if (tables.get(i) != null) { - tableMap.put(tables.get(i).getName(), tables.get(i)); + public static List
sortByForeignKeys(List
tables, Map allTables) { + if (allTables == null) { + allTables = new HashMap(); + for (Table t : tables) { + allTables.put(t.getName(), t); } } - - for (int i = 0; i < tables.size(); i++) { - sortByForegnKeysVisit(tables.get(i), tableMap, sorted, visited, stack); + + Set
resolved = new HashSet
(); + Set
temporary = new HashSet
(); + List
finalList = new ArrayList
(); + + for(Table t : tables) { + resolveForeginKeyOrder(t, allTables, resolved, temporary, finalList, null); } - - return sorted; + + Collections.reverse(finalList); + return finalList; } - - private static void sortByForegnKeysVisit(Table table, Map tableMap, - List
sorted, List
visited, List
stack) { - if (visited.contains(table)) { - return; + + public static void resolveForeginKeyOrder(Table t, Map allTables, Set
resolved, Set
temporary, List
finalList, Table parentTable) { + if (resolved.contains(t)) { return; } + if (temporary.contains(t)) {log.info("Possible circular dependent: " + t.getName()); return; } + if (!temporary.contains(t) && !resolved.contains(t)) { + temporary.add(t); + if (t == null) { + if (parentTable != null) { + StringBuilder dependentTables = new StringBuilder(); + for (ForeignKey fk : parentTable.getForeignKeys()) { + if(allTables.get(fk.getForeignTable()) == null) { + if (dependentTables.length() > 0) { dependentTables.append(", "); } + } + dependentTables.append(fk.getForeignTableName()); + } + log.warn("Unable to resolve foreign keys for table " + parentTable.getName() + " because the following dependent tables are not configured for replication [" + dependentTables.toString() + "]."); + } + } else { + for (ForeignKey fk : t.getForeignKeys()) { + Table fkTable = allTables.get(fk.getForeignTableName()); + if (fkTable == t) { + //selfDependent.add(t); + } + else { + resolveForeginKeyOrder(fkTable, allTables, resolved, temporary, finalList, t); + } + } + } + resolved.add(t); + finalList.add(0, t); } - if (stack.contains(table)) { - return; - } // cycle detected - ignore this FK - visited.add(table); - stack.add(table); - - for (ForeignKey fk : table.getForeignKeys()) { - Table foreignTable = tableMap.get(fk.getForeignTableName()); - if (foreignTable != null) { // ignore foreign keys to tables outside - // of the input set - sortByForegnKeysVisit(foreignTable, tableMap, sorted, visited, stack); + } + + public static String printTables(List
tables) { + StringBuffer sb = new StringBuffer(); + for (Table t : tables) { + sb.append(t.getName() + ","); + } + return sb.toString(); + } + + public static Table[] sortByForeignKeys(Table... tables) { + if (tables != null) { + List
list = new ArrayList
(tables.length); + for (Table table : tables) { + list.add(table); } + list = sortByForeignKeys(list, null); + tables = list.toArray(new Table[list.size()]); } - - sorted.add(table); - stack.remove(stack.size() - 1); + return tables; } + /** * Adds all tables from the other database to this database. Note that the * other database is not changed. diff --git a/symmetric-db/src/main/java/org/jumpmind/db/model/TypeMap.java b/symmetric-db/src/main/java/org/jumpmind/db/model/TypeMap.java index 69dc87b6d3..f0176fa20f 100644 --- a/symmetric-db/src/main/java/org/jumpmind/db/model/TypeMap.java +++ b/symmetric-db/src/main/java/org/jumpmind/db/model/TypeMap.java @@ -27,6 +27,7 @@ import java.util.HashSet; import java.util.Set; +import org.apache.commons.lang.StringUtils; import org.jumpmind.db.platform.PlatformUtils; /** @@ -188,6 +189,19 @@ public static Integer getJdbcTypeCode(String typeName) { return (Integer)_typeNameToTypeCode.get(typeName.toUpperCase()); } + + public static String getJdbcTypeDescriptions(int[] types) { + StringBuilder buff = new StringBuilder(32); + for (int type : types) { + buff.append(getJdbcTypeName(type)).append(", "); + } + + if (buff.length() > 0) { + buff.setLength(buff.length()-2); // loose the last ", " + } + + return buff.toString(); + } /** * Returns the JDBC type name that corresponds to the given type code @@ -199,7 +213,11 @@ public static Integer getJdbcTypeCode(String typeName) */ public static String getJdbcTypeName(int typeCode) { - return (String)_typeCodeToTypeName.get(new Integer(typeCode)); + String description = _typeCodeToTypeName.get(new Integer(typeCode)); + if (StringUtils.isEmpty(description)) { + description = String.valueOf(typeCode); + } + return description; } /** diff --git a/symmetric-db/src/main/java/org/jumpmind/db/sql/LogSqlBuilder.java b/symmetric-db/src/main/java/org/jumpmind/db/sql/LogSqlBuilder.java index 09d7b6504a..fc3d4734e2 100644 --- a/symmetric-db/src/main/java/org/jumpmind/db/sql/LogSqlBuilder.java +++ b/symmetric-db/src/main/java/org/jumpmind/db/sql/LogSqlBuilder.java @@ -129,7 +129,7 @@ public String buildDynamicSqlForLog(String sql, Object[] args, int[] types) { return dynamicSql.toString(); } - protected String formatValue(Object object, int type) { + public String formatValue(Object object, int type) { if (object == null) { return "null"; } diff --git a/symmetric-db/src/test/java/org/jumpmind/db/model/DatabaseTest.java b/symmetric-db/src/test/java/org/jumpmind/db/model/DatabaseTest.java index c873304bf0..2660346009 100644 --- a/symmetric-db/src/test/java/org/jumpmind/db/model/DatabaseTest.java +++ b/symmetric-db/src/test/java/org/jumpmind/db/model/DatabaseTest.java @@ -47,7 +47,7 @@ public void testOrderingOfFourTables() { list.add(t4); list.add(t3); - list = Database.sortByForeignKeys(list); + list = Database.sortByForeignKeys(list, null); assertTrue(list.toString(), list.indexOf(t4) < list.indexOf(t1)); assertTrue(list.toString(), list.indexOf(t2) < list.indexOf(t1)); @@ -87,7 +87,7 @@ public void testOrderingOfTenTables() { list.add(t8); - list = Database.sortByForeignKeys(list); + list = Database.sortByForeignKeys(list, null); assertTrue(list.toString(), list.indexOf(t4) < list.indexOf(t5)); assertTrue(list.toString(), list.indexOf(t3) < list.indexOf(t4)); @@ -112,7 +112,7 @@ public void testCyclicalReferences() { list.add(t2); list.add(t1); - list = Database.sortByForeignKeys(list); + list = Database.sortByForeignKeys(list, null); // for now just make sure it doesn't blow up diff --git a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/DbFill.java b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/DbFill.java index a0e560deb4..38a842ae4e 100644 --- a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/DbFill.java +++ b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/DbFill.java @@ -29,6 +29,7 @@ import java.util.Date; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; @@ -135,6 +136,9 @@ public class DbFill { // For cascading-select, quick check by table if it shares a common value with other tables private Set
commonDependencyTables = new HashSet
(); + // For cascading-select, to ensure composite keys contain proper values for all columns in the key + private Map> compositeForeignKeys = new HashMap>(); + // -1 for no limit private static final int RANDOM_SELECT_SIZE = 100; @@ -219,7 +223,14 @@ public void fillTables(String[] tableNames, Map tableProperties) { missingTableNames); } } - tables = Database.sortByForeignKeys(tables); + log.info("TABLES " + tables.size()); + tables = Database.sortByForeignKeys(tables, getAllDbTables()); + + StringBuffer tableOrder = new StringBuffer(); + for(Table t : tables) { + tableOrder.append(t.getName()).append(", "); + } + log.info("ORDER (" + tables.size() + " tables): " + tableOrder.toString()); buildForeignKeyReferences(tables); buildDependentColumnValues(tables); fillTables(tables, tableProperties); @@ -242,7 +253,13 @@ protected void buildForeignKeyReferences(List
tables) { for (ForeignKey fk : table.getForeignKeys()) { for (Reference ref : fk.getReferences()) { String key = table.getQualifiedTableName() + "." + ref.getLocalColumnName(); - foreignKeyReferences.put(key, new ForeignKeyReference(fk, ref)); + foreignKeyReferences.put(key,new ForeignKeyReference(fk, ref)); + } + if (fk.getReferences().length > 1) { + if (compositeForeignKeys.get(table) == null) { + compositeForeignKeys.put(table, new HashSet()); + } + compositeForeignKeys.get(table).add(fk); } } } @@ -268,7 +285,7 @@ protected void buildDependentColumnValues(List
tables) { List commonValue = new ArrayList(); StringBuilder sb = null; for (ForeignKeyReference fkr : references) { - String key = fkr.getForeignKey().getForeignTableName() + "." + fkr.getReference().getForeignColumnName(); + String key = table.getQualifiedColumnName(fkr.getReference().getForeignColumn()); commonDependencyValues.put(key, commonValue); commonDependencyTables.add(getDbTable(fkr.getForeignKey().getForeignTableName())); if (verbose) { @@ -365,7 +382,10 @@ private void fillTables(List
tables, Map tableProperties) { List
groupTables = new ArrayList
(); if (cascading && dmlType == INSERT) { - groupTables.addAll(foreignTables.get(tableToProcess)); + List
foreignTablesList = foreignTables.get(tableToProcess); + if (foreignTablesList != null) { + groupTables.addAll(foreignTablesList); + } if (groupTables.size() > 0) { log.info("Cascade insert " + tableToProcess.getName() + ": " + toStringTables(groupTables)); } @@ -935,15 +955,58 @@ private Row createRandomInsertValues(DmlStatement updStatement, Table table) { ForeignKeyReference fkr = foreignKeyReferences.get(table.getQualifiedColumnName(columns[i])); if (fkr != null) { - Map foreignRowValues = currentRowValues.get(fkr.getForeignKey().getForeignTableName()); - if (foreignRowValues != null) { - value = foreignRowValues.get(fkr.getReference().getForeignColumnName()); - } + //Object curVal = row.get(columns[i].getName()); + + //if (curVal == null) { + Map foreignRowValues = currentRowValues.get(fkr.getForeignKey().getForeignTableName()); + if (foreignRowValues != null) { + value = foreignRowValues.get(fkr.getReference().getForeignColumnName()); + } + if (value == null) { + //value = generateRandomValueForColumn(columns[i]); + } + // } else { + // value = curVal; + // currentRowValues.get(fkr.getForeignKey().getForeignTableName()).put(fkr.getReference().getForeignColumnName(), curVal); + // } } else { value = generateRandomValueForColumn(columns[i]); } row.put(columns[i].getName(), value); } + + /* + Set listCompositeForeignKeys = compositeForeignKeys.get(table); + + // Check for composite keys + if (listCompositeForeignKeys != null && listCompositeForeignKeys.size() > 0) { + if (listCompositeForeignKeys.size() > 1) { + // could be complicated as we have multiple composite keys on table + + // Map + Map> columnToForeignTableMap = new HashMap>(); + Iterator i = listCompositeForeignKeys.iterator(); + while (i.hasNext()) { + ForeignKey fk = (ForeignKey) i.next(); + for (Reference ref : fk.getReferences()) { + if (columnToForeignTableMap.get(fk.)) + + } + + } + + } + // Single composite key on table just ensure composite local columns match foreign table + else { + ForeignKey fk = listCompositeForeignKeys.iterator().next(); + Map foreignRowValues = currentRowValues.get(fk.getForeignTableName()); + + for (Reference ref : fk.getReferences()) { + row.put(ref.getLocalColumnName(), foreignRowValues.get(ref.getForeignColumnName())); + } + } + }*/ + currentRowValues.put(table.getName(), row); return row; } diff --git a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/writer/DefaultDatabaseWriter.java b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/writer/DefaultDatabaseWriter.java index bcda168066..a22298f3be 100644 --- a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/writer/DefaultDatabaseWriter.java +++ b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/data/writer/DefaultDatabaseWriter.java @@ -37,6 +37,7 @@ import org.jumpmind.db.model.Column; import org.jumpmind.db.model.Database; import org.jumpmind.db.model.Table; +import org.jumpmind.db.model.TypeMap; import org.jumpmind.db.platform.DatabaseInfo; import org.jumpmind.db.platform.IDatabasePlatform; import org.jumpmind.db.sql.DmlStatement; @@ -44,6 +45,7 @@ import org.jumpmind.db.sql.ISqlRowMapper; import org.jumpmind.db.sql.ISqlTemplate; import org.jumpmind.db.sql.ISqlTransaction; +import org.jumpmind.db.sql.LogSqlBuilder; import org.jumpmind.db.sql.Row; import org.jumpmind.db.sql.SqlException; import org.jumpmind.db.sql.SqlScriptReader; @@ -651,7 +653,7 @@ protected void logFailureDetails(Throwable e, CsvData data, boolean logLastDmlDe failureMessage.append("Failed to process "); failureMessage.append(data.getDataEventType().toString().toLowerCase()); failureMessage.append(" event in batch "); - failureMessage.append(batch.getBatchId()); + failureMessage.append(batch.getNodeBatchId()); failureMessage.append(".\n"); if (logLastDmlDetails && this.currentDmlStatement != null) { @@ -662,10 +664,11 @@ protected void logFailureDetails(Throwable e, CsvData data, boolean logLastDmlDe if (logLastDmlDetails && this.currentDmlValues != null) { failureMessage.append("Failed sql parameters: "); - failureMessage.append(StringUtils.abbreviate(Arrays.toString(currentDmlValues), CsvData.MAX_DATA_SIZE_TO_PRINT_TO_LOG)); + failureMessage.append(StringUtils.abbreviate("[" + dmlValuesToString(currentDmlValues, this.currentDmlStatement.getTypes()) + "]", + CsvData.MAX_DATA_SIZE_TO_PRINT_TO_LOG)); failureMessage.append("\n"); failureMessage.append("Failed sql parameters types: "); - failureMessage.append(Arrays.toString(this.currentDmlStatement.getTypes())); + failureMessage.append("[" + TypeMap.getJdbcTypeDescriptions(this.currentDmlStatement.getTypes()) + "]"); failureMessage.append("\n"); } @@ -681,6 +684,24 @@ protected void logFailureDetails(Throwable e, CsvData data, boolean logLastDmlDe log.info(failureMessage.toString(), e); } + protected String dmlValuesToString(Object[] dmlValues, int[] types) { + StringBuilder buff = new StringBuilder(); + if (dmlValues == null || dmlValues.length == 0) { + return ""; + } + + LogSqlBuilder logSqlBuilder = new LogSqlBuilder(); + + for (int i = 0; i < dmlValues.length; i++) { + buff.append(logSqlBuilder.formatValue(dmlValues[i], types[i])); + if (i < dmlValues.length-1) { + buff.append(", "); + } + } + + return buff.toString(); + } + @Override protected void bindVariables(Map variables) { super.bindVariables(variables); @@ -806,8 +827,9 @@ protected int execute(CsvData data, String[] values) { currentDmlValues = platform.getObjectValues(batch.getBinaryEncoding(), values, currentDmlStatement.getMetaData(), false, writerSettings.isFitToColumn()); if (log.isDebugEnabled()) { - log.debug("Submitting data {} with types {}", Arrays.toString(currentDmlValues), - Arrays.toString(this.currentDmlStatement.getTypes())); + log.debug("Submitting data [{}] with types [{}]", + dmlValuesToString(currentDmlValues, this.currentDmlStatement.getTypes()), + TypeMap.getJdbcTypeDescriptions(this.currentDmlStatement.getTypes())); } return transaction.addRow(data, currentDmlValues, this.currentDmlStatement.getTypes()); } diff --git a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagedResource.java b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagedResource.java index df86a5fdf3..0b93dbc081 100644 --- a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagedResource.java +++ b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagedResource.java @@ -91,10 +91,14 @@ public StagedResource(File directory, String path, StagingManager stagingManager protected static String toPath(File directory, File file) { String path = file.getAbsolutePath(); path = path.replaceAll("\\\\", "/"); - path = path.substring(directory.getAbsolutePath().length(), file - .getAbsolutePath().length()); - path = path.substring(1, path.lastIndexOf(".")); - return path; + path = path.substring(directory.getAbsolutePath().length(), file.getAbsolutePath().length()); + int extensionIndex = path.lastIndexOf("."); + if (extensionIndex > 0) { + path = path.substring(1, extensionIndex); + return path; + } else { + throw new IllegalStateException("Expected an extension of .done or .create at the end of the path and did not find it: " + path); + } } @Override diff --git a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagingManager.java b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagingManager.java index 3ffedae65e..e49268b232 100644 --- a/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagingManager.java +++ b/symmetric-io/src/main/java/org/jumpmind/symmetric/io/stage/StagingManager.java @@ -66,11 +66,11 @@ public Set getResourceReferences() { private void refreshResourceList() { Collection files = FileUtils.listFiles(this.directory, - new String[] { State.CREATE.getExtensionName(), State.DONE.getExtensionName(), State.DONE.getExtensionName() }, true); + new String[] { State.CREATE.getExtensionName(), State.DONE.getExtensionName() }, true); for (File file : files) { try { String path = StagedResource.toPath(directory, file); - if (!resourcePaths.contains(path)) { + if (path != null && !resourcePaths.contains(path)) { resourcePaths.add(path); } } catch (IllegalStateException ex) { diff --git a/symmetric-jdbc/src/main/java/org/jumpmind/db/util/BasicDataSourceFactory.java b/symmetric-jdbc/src/main/java/org/jumpmind/db/util/BasicDataSourceFactory.java index 8c2ff67a9e..4001dc8bb2 100644 --- a/symmetric-jdbc/src/main/java/org/jumpmind/db/util/BasicDataSourceFactory.java +++ b/symmetric-jdbc/src/main/java/org/jumpmind/db/util/BasicDataSourceFactory.java @@ -81,8 +81,12 @@ public static ResettableBasicDataSource create(TypedProperties properties, String password = properties.get(BasicDataSourcePropertyConstants.DB_POOL_PASSWORD, ""); if (password != null && password.startsWith(SecurityConstants.PREFIX_ENC)) { - password = securityService.decrypt(password.substring(SecurityConstants.PREFIX_ENC + try { + password = securityService.decrypt(password.substring(SecurityConstants.PREFIX_ENC .length())); + } catch (Exception ex) { + throw new IllegalStateException("Failed to decrypt the datbaase password from your engine properties file stored under the " + BasicDataSourcePropertyConstants.DB_POOL_PASSWORD + " property. Please re-encrypt your password", ex); + } } dataSource.setPassword(password); dataSource.setInitialSize(properties.getInt( diff --git a/symmetric-server/src/main/java/org/jumpmind/symmetric/web/SymmetricServlet.java b/symmetric-server/src/main/java/org/jumpmind/symmetric/web/SymmetricServlet.java index 62b301c210..3ed4633886 100644 --- a/symmetric-server/src/main/java/org/jumpmind/symmetric/web/SymmetricServlet.java +++ b/symmetric-server/src/main/java/org/jumpmind/symmetric/web/SymmetricServlet.java @@ -237,7 +237,7 @@ protected String normalizeUri(ISymmetricEngine engine, HttpServletRequest req) { if (engine != null) { String removeString = "/" + engine.getEngineName(); if (uri.startsWith(removeString)) { - uri = uri.substring(removeString.length()); + uri = uri.substring(removeString.length()); } } return uri; @@ -253,13 +253,35 @@ protected void logException(HttpServletRequest req, ServerSymmetricEngine engine Throwable root = ExceptionUtils.getRootCause(ex); int errorCount = engine.getErrorCountFor(nodeId); + + String msg = String.format("Error while processing %s request for node: %s", method, nodeId); + + if (!StringUtils.isEmpty(externalId) && !StringUtils.equals(nodeId, externalId)) { + msg += String.format(" externalId: %s", externalId); + } + + if (!StringUtils.isEmpty(hostName) && !StringUtils.isEmpty(address)) { + if (StringUtils.equals(hostName, address)) { + msg += String.format(" at %s", hostName); + } else { + msg += String.format(" at %s (%s)", address, hostName); + } + } else if (!StringUtils.isEmpty(hostName)) { + msg += String.format(" at %s", hostName); + } else if (!StringUtils.isEmpty(address)) { + msg += String.format(" at %s", address); + } + + msg += String.format(" with path: %s", ServletUtils.normalizeRequestUri(req)); + if (!(ex instanceof IOException || root instanceof IOException) || errorCount >= MAX_NETWORK_ERROR_FOR_LOGGING) { - log.error("Error while processing {} request for externalId: {}, node: {} at {} ({}) with path: {}", - new Object[] { method, externalId, nodeId, address, hostName, ServletUtils.normalizeRequestUri(req) }); - log.error("", ex); + log.error(msg, ex); } else { - log.info("Error while processing {} request for externalId: {}, node: {} at {} ({}) with path: {}. The message is: {}", - new Object[] { method, externalId, nodeId, address, hostName, ServletUtils.normalizeRequestUri(req), ex.getMessage() }); + if (log.isDebugEnabled()) { + log.info(msg, ex); + } else { + log.info(msg + " The message is: " + ex.getMessage()); + } } engine.incrementErrorCountForNode(nodeId); }