Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/3.9' into Josh
Browse files Browse the repository at this point in the history
Conflicts:
	symmetric-core/src/main/java/org/jumpmind/symmetric/model/IncomingBatch.java
	symmetric-core/src/main/java/org/jumpmind/symmetric/model/OutgoingBatch.java
	symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataExtractorService.java
	symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/DataService.java
	symmetric-core/src/main/java/org/jumpmind/symmetric/service/impl/OutgoingBatchService.java
  • Loading branch information
jumpmind-josh committed Nov 16, 2016
2 parents 9a5ed15 + 8c96083 commit c165e8c
Show file tree
Hide file tree
Showing 217 changed files with 4,489 additions and 2,100 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,12 @@ public <T> T getDataSource() {
public ISqlTemplate getSqlTemplate() {
return sqlTemplate;
}


@Override
public ISqlTemplate getSqlTemplateDirty() {
return sqlTemplate;
}

@Override
protected Object parseBigDecimal(String value) {
/* sqlite allows blank data in integer fields */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ public boolean invoke(boolean force) {
}

if (parameterService.is(ParameterConstants.FILE_SYNC_ENABLE)
&& parameterService.is("start.file.sync.tracker.job")
&& parameterService.is(ParameterConstants.START_FILE_SYNC_TRACKER_JOB)
&& parameterService.getLong("job.file.sync.tracker.period.time.ms", 5000) < (System
.currentTimeMillis() - lastFileSyncTrackerTime)) {
try {
Expand All @@ -205,7 +205,7 @@ public boolean invoke(boolean force) {
}

if (parameterService.is(ParameterConstants.FILE_SYNC_ENABLE)
&& parameterService.is("start.file.sync.pull.job")
&& parameterService.is(ParameterConstants.START_FILE_SYNC_PULL_JOB)
&& parameterService.getLong("job.file.sync.pull.period.time.ms", 60000) < (System
.currentTimeMillis() - lastFileSyncPullTime)) {
try {
Expand All @@ -219,7 +219,7 @@ public boolean invoke(boolean force) {
}

if (parameterService.is(ParameterConstants.FILE_SYNC_ENABLE)
&& parameterService.is("start.file.sync.push.job")
&& parameterService.is(ParameterConstants.START_FILE_SYNC_PUSH_JOB)
&& parameterService.getLong("job.file.sync.push.period.time.ms", 60000) < (System
.currentTimeMillis() - lastFileSyncPushTime)) {
try {
Expand Down Expand Up @@ -336,4 +336,9 @@ public void setTimeBetweenRunsInMs(long timeBetweenRunsInMs) {
}
}

@Override
public void startJobsAfterConfigChange() {
// No action on Android.
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
import org.jumpmind.db.model.Table;
import org.jumpmind.db.sql.ISqlRowMapper;
import org.jumpmind.db.sql.ISqlTransaction;
import org.jumpmind.db.sql.Row;
import org.jumpmind.db.sql.mapper.RowMapper;

import android.database.sqlite.SQLiteDatabase;

Expand Down Expand Up @@ -56,6 +58,16 @@ public boolean isInBatchMode() {
return false;
}

@Override
public Row queryForRow(String sql, Object... args) {
List<Row> rows = query(sql, new RowMapper(), args, null);
if (rows.size() > 0) {
return rows.get(0);
} else {
return null;
}
}

public <T> List<T> query(String sql, ISqlRowMapper<T> mapper, Map<String, Object> namedParams) {
return sqlTemplate.query(sql, mapper, namedParams);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ public boolean execute(NodeCommunication nodeCommunication, RemoteNodeStatuses s
nodeCommunication.setFailCount(0);
}
status.setComplete(true);
save(nodeCommunication);
save(nodeCommunication, false);
}
return !failed;
}
Expand Down
4 changes: 2 additions & 2 deletions symmetric-assemble/src/asciidoc/configuration/channels.ad
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ ifdef::pro[]
endif::pro[]

Channel ID:: Identifier used through the system to identify a given channel.
[[processing-order]]Processing Order:: Numeric value to determine the order in which a channel will be processed.
[[processing-order]]Processing Order:: Numeric value to determine the order in which a channel will be processed. Channels will be processed in ascending order.
[[batch-algorithm]]Batch Algorithm:: Batching is the grouping of data, by channel, to be transferred and committed at the client together.
.Channel Batching Algorithms
|===
Expand Down Expand Up @@ -83,7 +83,7 @@ Tables Contain Big Lobs:: Indicates whether the channel contains big lobs. Some
ifndef::pro[]
[source,sql]
----
insert into SYM_CHANNEL (channel_id, rocessing_order, max_batch_size, max_batch_to_send,
insert into SYM_CHANNEL (channel_id, processing_order, max_batch_size, max_batch_to_send,
extract_period_millis, batch_algorithm, enabled, description)
values ('item', 10, 1000, 10, 0, 'default', 1, 'Item and pricing data');

Expand Down
18 changes: 18 additions & 0 deletions symmetric-assemble/src/asciidoc/configuration/transforms/types.ad
Original file line number Diff line number Diff line change
Expand Up @@ -503,3 +503,21 @@ endif::pro[]
would convert a row with columns named "user1" and "user2" containing values "red" and "blue" into two rows with columns
"fieldid" and "color" containing a row of "1" and "red" and a row of "2" and "blue".
====



===== isEmpty Transform

This transformation checks to see if a string is null or zero length. If it is empty the replacement
value will be used. If no value is provided null will be used as a default replacement for empty values.

===== isBlank Transform

This transformation checks to see if a string is null or zero length after trimming white spaces. If it is blank the replacement
value will be used. If no value is provided null will be used as a default replacement for blank values.

===== Null Value Transform

This transformation checks to see if the source value is null and if so replaces it with the provided value.


Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 4 additions & 0 deletions symmetric-assemble/src/asciidoc/manage.ad
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ endif::pro[]

include::manage/node-add.ad[]

==== Load Data

include::manage/node-load.ad[]

==== Control

include::manage/node-control.ad[]
Expand Down
48 changes: 17 additions & 31 deletions symmetric-assemble/src/asciidoc/manage/node-initial-load.ad
Original file line number Diff line number Diff line change
@@ -1,32 +1,17 @@
An initial load is the process of seeding tables at a target node with data from a source node. Instead of
capturing data, data is selected from the source table using a SQL statement and then it is streamed to the
client.
Loading data for 3.8 and above has been modified, see <<Load Data>>.

ifdef::pro[]
An initial load is requested by click the *Initial Load* button and selecting either *Send Load To* or *Receive Load From*.

_Send Load To_ will queue up a load to be sent to the node that is selected from the node that is currently selected in the
web interface.

_Receive Load From_ will queue up a request for a load from the node that is selected to the current node.

image::manage/manage-nodes-send-load-to.png[]

endif::pro[]
ifndef::pro[]

When a load is requested it will either set the `initial_load_enabled` or the `reverse_initial_load_enabled` flag on the
appropriate <<NODE_SECURITY>> row.


When the <<Route Job>> runs next, it will create batches that represent the initial load. Batches will be created on the reload
channel for each table that is defined by <<Table Triggers>> and linked by <<Table Routing>> in the direction that the load
was requested. The default reload channel is the _"reload"_ channel. At the
same time reload batches are inserted, all previously pending batches
for the node are marked as successfully sent.

ifdef::pro[]
A load's progress can be monitored on the <<Outgoing Loads>> screen or the <<Outgoing Batches>> screen.
endif::pro[]

Each table defined by <<Table Triggers>> and linked by <<Table Routing>> is represented by a reload <<OUTGOING_BATCH>>. The batches
are inserted in the defined order. If the `initial_load_order` is the same then SymmetricDS tries to determine the order the
tables need to be loaded in automatically based on foreign key dependencies. A negative value for `initial_load_order` in <<Table Routing>> will result
Expand Down Expand Up @@ -86,19 +71,13 @@ IMPORTANT: When providing an
`initial_load_select`
be sure to test out the criteria against production data in a query
browser. Do an explain plan to make sure you are properly using indexes.
endif::pro[]

===== Initial Load Extract In Background

By default, all data for a given table will be initial loaded in a single batch, regardless
of the max batch size parameter on the reload channel. That is, for a table with one million
rows, all rows for that table will be initial loaded and sent to the destination node in a
single batch. For large tables, this can result in a batch that can take a long time to
extract and load.

Initial loads for a table can be broken into multiple batches by setting
`initial.load.use.extract.job.enabled` to true. This parameter allows
SymmetricDS to pre-extract initial load batches versus having them extracted when
the batch is pulled or pushed. When using this parameter, there are two ways to tell
By default, initial loads for a table are broken into multiple batches. SymmetricDS will pre-extract
initial load batches versus having them extracted when
the batch is pulled or pushed. There are two ways to tell
SymmetricDS the number of batches to create for a given table. The first is to specify
a positive integer in the initial_load_batch_count column on
<<Table Routing>>. This
Expand All @@ -109,17 +88,24 @@ When 0 is specified for initial_load_batch_count, SymmetricDS will execute a cou
the extract process and pre-create N batches based on the total number of records found
in the table divided by the `max_batch_size` on the reload channel.

By setting the `initial.load.use.extract.job.enabled` to false all data for a given table will be initial loaded
in a single batch, regardless of the max batch size parameter on the reload channel. That is, for a table with one million
rows, all rows for that table will be initial loaded and sent to the destination node in a
single batch. For large tables, this can result in a batch that can take a long time to
extract and load.

ifndef::pro[]
===== Reverse Initial Loads

Normal initial loads load data from the parent node to a client node. Occasionally, there may be need to do a one-time
initial load of data in the "reverse" direction. A reverse initial load is started by setting the `reverse_initial_load_enabled` flag
on <<NODE_SECURITY>>.

ifdef::pro[]
A reverse initial load is requested from the user interface by pressing _Initial Load_ and selecting _Receive Load From_.
===== Other Initial Load Settings

endif::pro[]

===== Other Initial Load Settings
===== Initial Load Parameters

There are several parameters that can be used to modify the behavior of an initial load.

Expand Down
125 changes: 125 additions & 0 deletions symmetric-assemble/src/asciidoc/manage/node-load.ad
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
A load is the process of seeding tables at a target node with data from a source node. Instead of
capturing data, data is selected from the source table using a SQL statement and then it is streamed to the
client.

ifndef::pro[]
Initial loads, reverse initial loads, and table reloads can utilize the <<TABLE_RELOAD_REQUEST>> to request a load with a variety of options.

===== Initial Load (all tables)
Insert a row into <<TABLE_RELOAD_REQUEST>> containing the value 'ALL' for both the trigger_id and router_id.
[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, last_update_time)
values ('store-001', 'corp-000', 'ALL', 'ALL', current_timestamp, current_timestamp);
----

===== Partial Load
Insert a row into <<TABLE_RELOAD_REQUEST>> for each trigger router combination to load.
[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, last_update_time)
values ('store-001', 'corp-000', 'item_selling_price', 'corp_2_store', current_timestamp, current_timestamp);

insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, last_update_time)
values ('store-001', 'corp-000', 'item', 'corp_2_store', current_timestamp, current_timestamp);
----

===== Reverse Initial Load (all tables)
Insert a row into <<TABLE_RELOAD_REQUEST>> with the proper source and target nodes for the direction of the load.
[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, last_update_time)
values ('corp-000', 'store-001', 'ALL', 'ALL', current_timestamp, current_timestamp);
----

===== Load data and create target tables
Insert a row into <<TABLE_RELOAD_REQUEST>> and set the create_table to 1 to send a table creation prior to the load running.
[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, create_table, last_update_time)
values ('corp-000', 'store-001', 'ALL', 'ALL', current_timestamp, 1, current_timestamp);
----

===== Load data and delete from target tables
Insert a row into <<TABLE_RELOAD_REQUEST>> and set the delete_first to 1 to delete all data in the target table prior to the load running.
[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, delete_first, last_update_time)
values ('corp-000', 'store-001', 'ALL', 'ALL', current_timestamp, 1, current_timestamp);
----

===== Load data for a specific table with partial data
Insert a row into <<TABLE_RELOAD_REQUEST>> and set the reload_select to the where clause to run while extracting data. There are 3 variables
available for replacement.

* $(groupId)
* $(nodeId)
* $(externalId)

[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, reload_select, last_update_time)
values ('store-001', 'corp-000', 'item_selling_price', 'corp_2_store', current_timestamp, 'store_id=$(externalId)', current_timestamp);

----

===== Load table with custom SQL run before the load executes.
Insert a row into <<TABLE_RELOAD_REQUEST>> and set the before_custom_sql to run before the load runs. The %s variable is available as replacement
for the table name.

[source,sql]
----
insert into SYM_TABLE_RELOAD_REQUEST (target_node_id, source_node_id, trigger_id, router_id, create_time, before_custom_sql, last_update_time)
values ('store-001', 'corp-000', 'ALL', 'ALL', current_timestamp, 'truncate table %s', current_timestamp);

----
endif::pro[]

ifdef::pro[]
As of 3.8 loading data has been consolidated into a single wizard.
Request a load by clicking the *Load Data* button on the manage nodes screen.

image::manage/manage-load-data.png[]

This will open a new window that will walk through the steps screen by screen to build the load needed. If there were no selected rows on the manage nodes screen when this button was clicked the first screen will prompt for a source and target set of nodes. Multiple selections can be made here as long as all source nodes or all target nodes belong to the same group.

image::manage/manage-load-data-source-target.png[]

The next screen allows for a selection of all tables configured (full load) or a subset of tables configured (partial load). Note, this will be the first screen provided if rows were already selected on the manage nodes screen when the load data button was selected.

The second option on this screen is to determine what "before action" should occur before the load is run.

.Before Load Options
|===

|Nothing just send the data|This assumes the tables are present and not need cleared out before the load can be run. If data already exists it will fall back to an update if the insert fails. This allows the load to be run repeatedly as needed.

|Create tables|This will send create table statements to the target to match the source before loading any data. Use this option when the target database does not already contain the tables used on the source.

|Delete all data|This will delete all data from the target table prior to loading data. This can be useful to accommodate data that is in the target but no longer in the source. By default just sending the data will only insert/update with the rows from the source so any old data will remain in place on the target unless this delete action is taken prior to loading.

|Truncate tables|This will perform a truncate on all target tables prior to loading.

|Run custom SQL|Allows a custom SQL statement to be executed on each table prior to loading. The use of %s will server as a variable to be replaced at runtime with the table name. Example: truncate table %s

|===

image::manage/manage-load-data-full-partial-before.png[]

Partial loads will then see a table selection screen. Full loads will immediately be taken to the summary screen. The table selection screen allows individual tables to be selected as part of the load. There is also the ability to provide a where clause to be used to extract the data for a load. This allows control over what data should be loaded for a given table.

.Where clause variable replacements
* $(groupId)
* $(nodeId)
* $(externalId)

image::manage/manage-load-data-tables.png[]

Finally a summary screen is presented to review all the settings for the load prior to adding it to the request queue. Loads are checked by the routing process so once the load has been saved it will be picked up and begin processing on the next run of the routing job.

image::manage/manage-load-data-summary.png[]

A load's progress can be monitored on the <<Outgoing Loads>> screen or the <<Outgoing Batches>> screen.

endif::pro[]

8 changes: 7 additions & 1 deletion symmetric-assemble/src/asciidoc/setup/engine-properties.ad
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,13 @@ If the properties are changed in
they will take effect across all engines deployed to the server.

NOTE: You can use the variable `$(hostName)` to represent the host name of the machine when defining these properties
(for example, external.id=`$(hostName)`).
(for example, external.id=`$(hostName)`). You can also access external id, engine name, node group id, sync URL, and registration URL in this manner.
(for example, engine.name=$(nodeGroupId)-$(externalId)).

NOTE: You can also use BSH script for the external id, engine name, node group id, sync URL, and registration URL. Use back ticks to indicate the
BSH expression, and note that only one BSH expression is supporter for a given property line. The script can be prefixed or suffixed with fixed text.
For example, if you wish to based the external id off of just a part of the hostname (e.g., substring of hostName):
external.id=store-\`import org.apache.commons.lang.StringUtils; return StringUtils.substring(hostName,2,4);\`


engine.name::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,7 @@ protected IDatabasePlatform createDatabasePlatform(TypedProperties properties) {

public static IDatabasePlatform createDatabasePlatform(ApplicationContext springContext, TypedProperties properties,
DataSource dataSource, boolean waitOnAvailableDatabase) {
log.info("Initializing connection to database");
if (dataSource == null) {
String jndiName = properties.getProperty(ParameterConstants.DB_JNDI_NAME);
if (StringUtils.isNotBlank(jndiName)) {
Expand Down
Loading

0 comments on commit c165e8c

Please sign in to comment.