diff --git a/commercedbsync/extensioninfo.xml b/commercedbsync/extensioninfo.xml index 4b1ef33..5204953 100644 --- a/commercedbsync/extensioninfo.xml +++ b/commercedbsync/extensioninfo.xml @@ -7,9 +7,11 @@ --> - + + + - + diff --git a/commercedbsync/external-dependencies.xml b/commercedbsync/external-dependencies.xml index 4b27d5b..4e72fdc 100644 --- a/commercedbsync/external-dependencies.xml +++ b/commercedbsync/external-dependencies.xml @@ -28,11 +28,6 @@ commons-dbcp2 2.7.0 - - com.microsoft.azure - azure-storage - 8.1.0 - com.zaxxer HikariCP diff --git a/commercedbsync/project.properties b/commercedbsync/project.properties index b5b2347..e5e9aa9 100644 --- a/commercedbsync/project.properties +++ b/commercedbsync/project.properties @@ -254,12 +254,20 @@ migration.schema.target.columns.remove.enabled=true ## migration.schema.autotrigger.enabled=false ## -# Activate data export to external DB via cron jobs +# Specifies where to store the internal tables +# If empty and migration.data.synchronization.enabled is true, then it is set to "target", otherwise "source" +# +# @values target or source +# @optional true +## +migration.internal.tables.storage= +## +# Activate data synchronization to external DB via cron jobs # # @values true or false # @optional true ## -migration.data.export.enabled=false +migration.data.synchronization.enabled=false ## # Specifies the number of rows to read per batch. This only affects tables which can be batched. # @@ -370,6 +378,13 @@ migration.data.columns.excluded.attributedescriptors= ## migration.data.columns.nullify.attributedescriptors= ## +# Configure columns to use when reading by offset +# +# @values comma separated list of column names +# @optional true +## +migration.data.columns.batch.TABLE= +## # If set to true, all indices in the target table will be removed before copying over the data. # # @values true or false @@ -429,13 +444,35 @@ migration.data.tables.excluded=SYSTEMINIT,StoredHttpSessions,itemdeletionmarkers ## migration.data.tables.included= ## +# Tables to be migrated as first (use table names with prefix and suffix) +# +# @values comma separated list of table full names. +# @optional true +## +migration.data.tables.order.first= +## +# Tables to be migrated as last (use table names with prefix and suffix) +# +# @values comma separated list of table full names. +# @optional true +## +migration.data.tables.order.last= +## +# List of partitioned tables (use table names with prefix and suffix). Separate batches will be created for each table partition. +# This only applies when using HANA as source database. +# +# @values comma separated list of table full names. +# @optional true +## +migration.data.tables.partitioned= +## # Run migration in the cluster (based on commerce cluster config). The 'HAC' node will be the primary one. # A scheduling algorithm decides which table will run on which node. Nodes are notified using cluster events. # # @values true or false # @optional true ## -migration.cluster.enabled=false +migration.cluster.enabled=true ## # If set to true, the migration will resume from where it stopped (either due to errors or cancellation). # @@ -502,7 +539,7 @@ migration.data.report.connectionstring=${media.globalSettings.cloudAzureBlobStor # @values any property key # @optional true ## -migration.properties.masked=migration.data.report.connectionstring,migration.ds.source.db.password,migration.ds.target.db.password +migration.properties.masked=migration.data.report.connectionstring,migration.ds.source.db.username,migration.ds.source.db.password,migration.ds.target.db.username,migration.ds.target.db.password ## # Specifies the default locale used. # @@ -657,5 +694,3 @@ log4j2.logger.migrationToolkit.name=com.sap.cx.boosters.commercedbsync log4j2.logger.migrationToolkit.level=INFO log4j2.logger.migrationToolkit.appenderRef.migration.ref=MigrationAppender log4j2.logger.migrationToolkit.additivity=false - - diff --git a/commercedbsync/resources/commercedbsync-beans.xml b/commercedbsync/resources/commercedbsync-beans.xml index c146ebf..997312d 100644 --- a/commercedbsync/resources/commercedbsync-beans.xml +++ b/commercedbsync/resources/commercedbsync-beans.xml @@ -153,6 +153,7 @@ + @@ -170,6 +171,7 @@ + diff --git a/commercedbsync/resources/commercedbsync-spring.xml b/commercedbsync/resources/commercedbsync-spring.xml index cb6a148..1522de6 100644 --- a/commercedbsync/resources/commercedbsync-spring.xml +++ b/commercedbsync/resources/commercedbsync-spring.xml @@ -60,11 +60,22 @@ + + + + + + + + + + + @@ -202,6 +213,7 @@ + @@ -261,6 +273,7 @@ + @@ -371,7 +384,15 @@ - + + + + + + + diff --git a/commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex b/commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex index 9c9f630..61f7065 100644 --- a/commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex +++ b/commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex @@ -2,6 +2,8 @@ INSERT_UPDATE ServicelayerJob;code[unique=true];springId[unique=true] ;incrementalMigrationJob;incrementalMigrationJob ;fullMigrationJob;fullMigrationJob +;reverseIncrementalMigrationJob;reverseIncrementalMigrationJob +;reverseFullMigrationJob;reverseFullMigrationJob ;migrationPrepJob;migrationPrepJob # Update details for incremental migration @@ -15,6 +17,12 @@ INSERT_UPDATE FullMigrationCronJob;code[unique=true];job(code)[default=fullMigra ;fullDatabaseMigrationJob;;true;true;true;true;true;;mediaformatmapping,cat2attrrellp,categories,compositeentries,mediafolders,mediacontextlp,validationconstraintslp,validationconstraints,catalogslp,units,genericitems,pcp2wrtblecvrel,renderertemplate,dynamiccontent,userrightslp,backofficesearchcond,metainformations,unitslp,workflowactions,productprops,scripts,systemsetupaudit,gentestitems,cat2princrel,jalovelocityrenderer,paymentmodeslp,usergroupprops,orderprops,userrights,workflowactionitemsrel,parserproperty,productfeatures,productreferences,commentcompreadrels,languageslp,syncjob2pcplrel,commentitemrelations,jobs,themes,discounts,catalogversionsyncjob,cat2catrel,categorieslp,syncjob2langrel,currencieslp,impexdocumentids,userprofiles,stdpaymmodevals,links,workflowitematts,products,backofficesavedquery,productslp,workflowtemplatelinkrel,previewtickets,backofficecollections,props,retentionrule,syncjob2typerel,commentcompremoverels,genericitemslp,addresses,catalogs,languages,taxeslp,discountslp,distributedbatches,backofficesavedquerylp,searchrestrictions,aclentries,format2medforrel,keywords,paymentmodes,whereparts,commentassignrelations,commentattachments,discountrows,mediacontainerlp,commentdomains,synattcfg,mediacontext,impbatchcontent,classificationattrslp,commenttypes,globaldiscountrows,mediacontainer,searchrestrictionslp,mediaformatlp,catverdiffs,cmptype2covgrprels,workflowtemplprincrel,clattruntlp,jobslp,titles,pendingstepsrelation,themeslp,countries,commentcompwriterels,processedstepsrelation,slactions,productreferenceslp,usergroups,regionslp,userprops,exportslp,numberseries,distributedprocesses,catalogversions,externalimportkey,usergroupslp,cat2attrrel,medias,jobsearchrestriction,triggerscj,addressprops,openidexternalscopes,attr2valuerel,constraintgroup,renderertemplatelp,titleslp,indextestitem,workflowactionlinkrel,workflowactionslp,catalogversionslp,commentwatchrelations,configitems,pcpl2rdblecvrel,abstrcfgproductinfo,users,workflowitemattslp,commentcompcreaterels,derivedmedias,cat2medrel,scriptslp,regions,currencies,steps,deliverymodeslp,classattrvalueslp,mediaformat,zonedeliverymodevalues,configuratorsettings,prod2keywordrel,cat2prodrel,taxes,cat2keywordrel,classattrvalues,ydeployments,cstrgr2abscstrrel,mediaprops,pgrels,zone2country,classificationattrs,taxrows,renderersproperty,cronjobs,commentcomponents,exports,deliverymodes,comments,workflowactioncomments,countrieslp,commentusersettings,format2comtyprel,corsconfigproperty,backofficecollitemrefs,pricerows,agreements,workflowactionsrel,clattrunt,format,changedescriptors,formatlp,zones ;fullTableMigrationJob;;true;true;false;false;true;;products,paymentmodes +INSERT_UPDATE IncrementalMigrationCronJob; code[unique = true] ; migrationItems; active[default=true]; job(code)[default = reverseIncrementalMigrationJob]; sessionLanguage(isoCode)[default = en] + ; reverseIncrementalMigrationJob ; products + +INSERT_UPDATE FullMigrationCronJob; code[unique = true] ; truncateEnabled; migrationItems; job(code)[default = reverseFullMigrationJob]; sessionLanguage(isoCode)[default = en] + ; reverseFullMigrationJob ; false ; products + INSERT_UPDATE CronJob;code[unique=true];job(code);useReadOnlyDatasource ;migrationPrepCronJob;migrationPrepJob;false @@ -22,17 +30,26 @@ INSERT_UPDATE CompositeEntry;code[unique=true];executableCronJob(code) ;prepForTableMigrationEntry;migrationPrepCronJob ;prepForDbMigrationEntry;migrationPrepCronJob ;prepForIncrementalMigrationEntry;migrationPrepCronJob + ;prepForReverseTableMigrationEntry;migrationPrepCronJob + ;prepForReverseIncrementalMigrationEntry;migrationPrepCronJob ;migrateDatabaseEntry;fullDatabaseMigrationJob ;migrateTableEntry;fullTableMigrationJob ;migrateIncrementalEntry;incrementalMigrationJob + ;reverseMigrateTableEntry;reverseFullMigrationJob + ;reverseMigrateIncrementalEntry;reverseIncrementalMigrationJob INSERT_UPDATE CompositeCronJob;code[unique=true];job(code);sessionLanguage(isoCode)[default=en]; ;compositeDatabaseMigrationJob;compositeJobPerformable; ;compositeTableMigrationJob;compositeJobPerformable; ;compositeIncrementalMigrationJob;compositeJobPerformable; + ;compositeReverseTableMigrationJob;compositeJobPerformable; + ;compositeReverseIncrementalMigrationJob;compositeJobPerformable; INSERT_UPDATE CompositeCronJob;code[unique=true];compositeEntries(code)[mode = append] ;compositeDatabaseMigrationJob;prepForDbMigrationEntry,migrateDatabaseEntry ;compositeTableMigrationJob;prepForTableMigrationEntry,migrateTableEntry ;compositeIncrementalMigrationJob;prepForIncrementalMigrationEntry,migrateIncrementalEntry + ;compositeReverseTableMigrationjob;prepForReverseTableMigrationEntry,reverseMigrateTableEntry + ;compositeReverseIncrementalMigrationJob;prepForReverseIncrementalMigrationEntry,reverseMigrateIncrementalEntry + diff --git a/commercedbsync/resources/sql/createSchedulerTablesHANA.sql b/commercedbsync/resources/sql/createSchedulerTablesHANA.sql index 75f189e..535a200 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesHANA.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesHANA.sql @@ -19,6 +19,12 @@ IF tablename = 'MIGRATIONTOOLKIT_TABLECOPYBATCHES' AND :found > 0 THEN DROP TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES; END IF; + +IF tablename = 'MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART' AND :found > 0 + THEN +DROP TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART; +END IF; + END; # CALL MIGRATION_PROCEDURE('MIGRATIONTOOLKIT_TABLECOPYTASKS'); @@ -28,6 +34,7 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( targetnodeId int NOT NULL, migrationId NVARCHAR(255) NOT NULL, pipelinename NVARCHAR(255) NOT NULL, + itemorder int NOT NULL DEFAULT 0, sourcetablename NVARCHAR(255) NOT NULL, targettablename NVARCHAR(255) NOT NULL, columnmap NVARCHAR(5000) NULL, @@ -67,6 +74,21 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( # +CALL MIGRATION_PROCEDURE('MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART'); +# + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART ( + migrationId NVARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename NVARCHAR(255) NOT NULL, + lowerBoundary NVARCHAR(255) NOT NULL, + upperBoundary NVARCHAR(255) NULL, + partition VARCHAR(128) NOT NULL, + PRIMARY KEY (migrationid, batchId, pipelinename, partition) +); + +# + CALL MIGRATION_PROCEDURE('MIGRATIONTOOLKIT_TABLECOPYSTATUS'); # diff --git a/commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql b/commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql index e2d4063..c02e7e2 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql @@ -5,6 +5,7 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( targetnodeId int NOT NULL, migrationId NVARCHAR(255) NOT NULL, pipelinename NVARCHAR(255) NOT NULL, + itemorder int NOT NULL DEFAULT 0, sourcetablename NVARCHAR(255) NOT NULL, targettablename NVARCHAR(255) NOT NULL, columnmap NVARCHAR(MAX) NULL, @@ -39,6 +40,18 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( PRIMARY KEY (migrationid, batchId, pipelinename) ); +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART; + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART ( + migrationId NVARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename NVARCHAR(255) NOT NULL, + lowerBoundary NVARCHAR(255) NOT NULL, + upperBoundary NVARCHAR(255) NULL, + partition VARCHAR(128) NOT NULL, + PRIMARY KEY (migrationid, batchId, pipelinename, partition) +); + DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS; CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS ( diff --git a/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql b/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql index 031c60f..c976ceb 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql @@ -5,6 +5,7 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS targetnodeId int NOT NULL, migrationId VARCHAR(255) NOT NULL, pipelinename VARCHAR(255) NOT NULL, + itemorder int NOT NULL DEFAULT 0, sourcetablename VARCHAR(255) NOT NULL, targettablename VARCHAR(255) NOT NULL, columnmap TEXT NULL, @@ -40,6 +41,19 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES PRIMARY KEY (migrationid, batchId, pipelinename) ); # +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART; +# +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART +( + migrationId VARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename VARCHAR(255) NOT NULL, + lowerBoundary VARCHAR(255) NOT NULL, + upperBoundary VARCHAR(255) NULL, + partition VARCHAR(128) NOT NULL, + PRIMARY KEY (migrationid, batchId, pipelinename, partition) +); +# DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS; # CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS diff --git a/commercedbsync/resources/sql/createSchedulerTablesORACLE.sql b/commercedbsync/resources/sql/createSchedulerTablesORACLE.sql index 75bc479..bf2047f 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesORACLE.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesORACLE.sql @@ -11,6 +11,7 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( targetnodeId number(10) NOT NULL, migrationId NVARCHAR2(255) NOT NULL, pipelinename NVARCHAR2(255) NOT NULL, + itemorder int DEFAULT 0 NOT NULL, sourcetablename NVARCHAR2(255) NOT NULL, targettablename NVARCHAR2(255) NOT NULL, columnmap CLOB NULL, @@ -57,6 +58,27 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( +BEGIN + EXECUTE IMMEDIATE 'DROP TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART'; +EXCEPTION + WHEN OTHERS THEN NULL; +END; +/ + + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART ( + migrationId NVARCHAR2(255) NOT NULL, + batchId number(10) DEFAULT 0 NOT NULL, + pipelinename NVARCHAR2(255) NOT NULL, + lowerBoundary NVARCHAR2(255) NOT NULL, + upperBoundary NVARCHAR2(255) NULL, + partition VARCHAR(128) NOT NULL, + PRIMARY KEY (migrationid, batchId, pipelinename, partition) +) +/ + + + BEGIN EXECUTE IMMEDIATE 'DROP TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS'; diff --git a/commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql b/commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql index ac748b2..f10cb61 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql @@ -6,6 +6,7 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( targetnodeId int NOT NULL, migrationId VARCHAR(255) NOT NULL, pipelinename VARCHAR(255) NOT NULL, + itemorder int NOT NULL DEFAULT 0, sourcetablename VARCHAR(255) NOT NULL, targettablename VARCHAR(255) NOT NULL, columnmap text NULL, @@ -46,6 +47,22 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( # +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART; + +# + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES_PART ( + migrationId VARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename VARCHAR(255) NOT NULL, + lowerBoundary VARCHAR(255) NOT NULL, + upperBoundary VARCHAR(255) NULL, + partition VARCHAR(128) NOT NULL, + PRIMARY KEY (migrationid, batchId, pipelinename, partition) +); + +# + DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS; # diff --git a/commercedbsync/resources/sql/createSchemaSchedulerTablesHANA.sql b/commercedbsync/resources/sql/createSchemaSchedulerTablesHANA.sql index 97dc721..6e4c993 100644 --- a/commercedbsync/resources/sql/createSchemaSchedulerTablesHANA.sql +++ b/commercedbsync/resources/sql/createSchemaSchedulerTablesHANA.sql @@ -54,7 +54,7 @@ CREATE TABLE MIGRATIONTOOLKIT_SCHEMADIFFSTATUS ( completed INT NOT NULL DEFAULT 0, failed INT NOT NULL DEFAULT 0, status NVARCHAR(255) NOT NULL DEFAULT 'RUNNING', - sqlScript NVARCHAR(5000) NULL + sqlScript CLOB NULL ); # CREATE OR REPLACE TRIGGER MIGRATIONTOOLKIT_SCHEMADIFFSTATUS_Update_trigger diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java index f914d6d..e3aadc7 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java @@ -11,6 +11,7 @@ import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; +import java.util.List; public interface DataRepositoryAdapter { long getRowCount(MigrationContext context, String table) throws Exception; @@ -23,4 +24,6 @@ public interface DataRepositoryAdapter { DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQueryDefinition queryDefinition) throws Exception; + + List getPartitions(String table) throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java index 0049ae8..950d784 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java @@ -6,16 +6,16 @@ package com.sap.cx.boosters.commercedbsync.adapter.impl; +import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; +import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; +import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.dataset.DataSet; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; -import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; -import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; -import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; - import java.time.Instant; +import java.util.List; /** * Controls the way the repository is accessed by adapting the most common @@ -82,6 +82,11 @@ public DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQ } } + @Override + public List getPartitions(String table) throws Exception { + return repository.getPartitions(table); + } + private Instant getIncrementalTimestamp(MigrationContext context) { Instant incrementalTimestamp = context.getIncrementalTimestamp(); if (incrementalTimestamp == null) { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java index e041413..6abe3a8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java @@ -25,6 +25,7 @@ import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; import com.sap.cx.boosters.commercedbsync.views.TableViewGenerator; +import de.hybris.platform.core.Registry; import org.apache.commons.lang3.tuple.Pair; import org.fest.util.Collections; import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; @@ -72,6 +73,9 @@ public DataPipe create(CopyContext context, CopyContext.DataCopyItem it try { executor.submit(() -> { try { + if (!Registry.hasCurrentTenant()) { + Registry.activateMasterTenant(); + } scheduleWorkers(context, workerExecutor, pipe, item); workerExecutor.waitAndRethrowUncaughtExceptions(); pipe.put(MaybeFinished.finished(DataSet.EMPTY)); @@ -131,16 +135,8 @@ private void scheduleWorkers(CopyContext context, DataWorkerExecutor wo if (batchColumn.isEmpty()) { // trying offset queries with unique index columns - Set batchColumns; - DataSet uniqueColumns = context.getMigrationContext().getDataSourceRepository() - .getUniqueColumns(TableViewGenerator.getTableNameForView(table, context.getMigrationContext())); - if (uniqueColumns.isNotEmpty()) { - if (uniqueColumns.getColumnCount() == 0) { - throw new IllegalStateException( - "Corrupt dataset retrieved. Dataset should have information about unique columns"); - } - batchColumns = uniqueColumns.getAllResults().stream().map(row -> String.valueOf(row.get(0))) - .collect(Collectors.toCollection(LinkedHashSet::new)); + Set batchColumns = getBatchColumns(context, table); + if (!batchColumns.isEmpty()) { taskRepository.updateTaskCopyMethod(context, copyItem, DataCopyMethod.OFFSET.toString()); taskRepository.updateTaskKeyColumns(context, copyItem, batchColumns); @@ -191,50 +187,46 @@ private void scheduleWorkers(CopyContext context, DataWorkerExecutor wo taskRepository.updateTaskKeyColumns(context, copyItem, Lists.newArrayList(batchColumn)); List> batchMarkersList; if (context.getMigrationContext().isSchedulerResumeEnabled()) { - Set pendingBatchesForPipeline = taskRepository - .findPendingBatchesForPipeline(context, copyItem); - batchMarkersList = pendingBatchesForPipeline.stream() - .map(b -> Collections.list(b.getLowerBoundary())).collect(Collectors.toList()); - taskRepository.resetPipelineBatches(context, copyItem); - } else { - MarkersQueryDefinition queryDefinition = new MarkersQueryDefinition(); - queryDefinition.setTable(table); - queryDefinition.setColumn(batchColumn); - queryDefinition.setBatchSize(batchSize); - queryDefinition.setDeletionEnabled(context.getMigrationContext().isDeletionEnabled()); - queryDefinition.setLpTableEnabled(context.getMigrationContext().isLpTableMigrationEnabled()); - DataSet batchMarkers = dataRepositoryAdapter - .getBatchMarkersOrderedByColumn(context.getMigrationContext(), queryDefinition); - batchMarkersList = batchMarkers.getAllResults(); - if (batchMarkersList.isEmpty()) { - throw new RuntimeException("Could not retrieve batch values for table " + table); - } - } - for (int i = 0; i < batchMarkersList.size(); i++) { - boolean processBatch = isCurrentChunkBatch(copyItem, chunkedTable, i); - if (processBatch) { - List lastBatchMarkerRow = batchMarkersList.get(i); - Optional> nextBatchMarkerRow = Optional.empty(); - int nextIndex = i + 1; - if (nextIndex < batchMarkersList.size()) { - nextBatchMarkerRow = Optional.of(batchMarkersList.get(nextIndex)); + if (context.getMigrationContext().getPartitionedTables().contains(table)) { + LOG.debug("Resuming partitioned table {}", table); + final var partitions = dataRepositoryAdapter.getPartitions(table); + for (String partition : partitions) { + Set pendingBatchesForPipeline = taskRepository + .findPendingBatchesForPipeline(context, copyItem, partition); + batchMarkersList = new ArrayList<>(pendingBatchesForPipeline.stream() + .map(b -> Collections.list(b.getLowerBoundary())).toList()); + taskRepository.resetPipelineBatches(context, copyItem, partition); + createDataReaderTasks(workerExecutor, pipeTaskContext, batchColumn, batchMarkersList, + copyItem, chunkedTable, partition); } - if (!Collections.isEmpty(lastBatchMarkerRow)) { - Object lastBatchValue = lastBatchMarkerRow.get(0); - Object nextValue = nextBatchMarkerRow.map(v -> v.get(0)).orElseGet(() -> null); - // check if nextValue is null and allow Pair(value, null) only if it is last - // chunk - Pair batchMarkersPair = Pair.of(lastBatchValue, nextValue); - DataReaderTask dataReaderTask = new BatchMarkerDataReaderTask(pipeTaskContext, i, - batchColumn, batchMarkersPair, false); - // After creating the task, we register the batch in the db for later use if - // necessary - taskRepository.scheduleBatch(context, copyItem, i, batchMarkersPair.getLeft(), - batchMarkersPair.getRight()); - workerExecutor.safelyExecute(dataReaderTask); - } else { - throw new IllegalArgumentException("Invalid batch marker passed to task"); + } else { + Set pendingBatchesForPipeline = taskRepository + .findPendingBatchesForPipeline(context, copyItem); + batchMarkersList = pendingBatchesForPipeline.stream() + .map(b -> Collections.list(b.getLowerBoundary())).collect(Collectors.toList()); + taskRepository.resetPipelineBatches(context, copyItem); + createDataReaderTasks(workerExecutor, pipeTaskContext, batchColumn, batchMarkersList, copyItem, + chunkedTable, null); + } + } else { + if (context.getMigrationContext().getPartitionedTables().contains(table)) { + LOG.debug("Processing partitioned table {}", table); + final var partitions = dataRepositoryAdapter.getPartitions(table); + for (String partition : partitions) { + MarkersQueryDefinition queryDefinition = new MarkersQueryDefinition(); + queryDefinition.setPartition(partition); + LOG.debug("getBatchMarkers for partition {}", partition); + final var batchMarkers = getBatchMarkers(context, dataRepositoryAdapter, table, batchSize, + batchColumn, queryDefinition); + createDataReaderTasks(workerExecutor, pipeTaskContext, batchColumn, batchMarkers, copyItem, + chunkedTable, partition); } + } else { + MarkersQueryDefinition queryDefinition = new MarkersQueryDefinition(); + final var batchMarkers = getBatchMarkers(context, dataRepositoryAdapter, table, batchSize, + batchColumn, queryDefinition); + createDataReaderTasks(workerExecutor, pipeTaskContext, batchColumn, batchMarkers, copyItem, + chunkedTable, null); } } } @@ -259,4 +251,72 @@ private static boolean isCurrentChunkBatch(CopyContext.DataCopyItem copyItem, bo } return processBatch; } + + protected List> getBatchMarkers(CopyContext context, DataRepositoryAdapter dataRepositoryAdapter, + String table, long batchSize, String batchColumn, MarkersQueryDefinition queryDefinition) throws Exception { + queryDefinition.setTable(table); + queryDefinition.setColumn(batchColumn); + queryDefinition.setBatchSize(batchSize); + queryDefinition.setDeletionEnabled(context.getMigrationContext().isDeletionEnabled()); + queryDefinition.setLpTableEnabled(context.getMigrationContext().isLpTableMigrationEnabled()); + DataSet batchMarkers = dataRepositoryAdapter.getBatchMarkersOrderedByColumn(context.getMigrationContext(), + queryDefinition); + List> batchMarkersList = batchMarkers.getAllResults(); + if (batchMarkersList.isEmpty()) { + throw new RuntimeException("Could not retrieve batch values for table " + table); + } + return batchMarkersList; + } + + protected void createDataReaderTasks(DataWorkerExecutor workerExecutor, PipeTaskContext pipeTaskContext, + String batchColumn, List> batchMarkersList, final CopyContext.DataCopyItem copyItem, + final boolean chunkedTable, String partition) throws Exception { + for (int i = 0; i < batchMarkersList.size(); i++) { + boolean processBatch = isCurrentChunkBatch(copyItem, chunkedTable, i); + if (processBatch) { + List lastBatchMarkerRow = batchMarkersList.get(i); + Optional> nextBatchMarkerRow = Optional.empty(); + int nextIndex = i + 1; + if (nextIndex < batchMarkersList.size()) { + nextBatchMarkerRow = Optional.of(batchMarkersList.get(nextIndex)); + } + if (!Collections.isEmpty(lastBatchMarkerRow)) { + Object lastBatchValue = lastBatchMarkerRow.get(0); + Object nextValue = nextBatchMarkerRow.map(v -> v.get(0)).orElseGet(() -> null); + // check if nextValue is null and allow Pair(value, null) only if it is last + // chunk + Pair batchMarkersPair = Pair.of(lastBatchValue, nextValue); + DataReaderTask dataReaderTask = partition == null + ? new BatchMarkerDataReaderTask(pipeTaskContext, i, batchColumn, batchMarkersPair, false) + : new PartitionedBatchMarkerDataReaderTask(pipeTaskContext, i, batchColumn, + batchMarkersPair, false, partition); + // After creating the task, we register the batch in the db for later use if + // necessary + taskRepository.scheduleBatch(pipeTaskContext.getContext(), copyItem, i, batchMarkersPair.getLeft(), + batchMarkersPair.getRight(), partition); + workerExecutor.safelyExecute(dataReaderTask); + } else { + throw new IllegalArgumentException("Invalid batch marker passed to task"); + } + } + } + } + + protected Set getBatchColumns(CopyContext context, String table) throws Exception { + if (context.getMigrationContext().getBatchColumns().containsKey(table)) { + return context.getMigrationContext().getBatchColumns().get(table); + } else { + DataSet uniqueColumns = context.getMigrationContext().getDataSourceRepository() + .getUniqueColumns(TableViewGenerator.getTableNameForView(table, context.getMigrationContext())); + if (uniqueColumns.isNotEmpty()) { + if (uniqueColumns.getColumnCount() == 0) { + throw new IllegalStateException( + "Corrupt dataset retrieved. Dataset should have information about unique columns"); + } + return uniqueColumns.getAllResults().stream().map(row -> String.valueOf(row.get(0))) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + return Set.of(); + } + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java index 0168429..e076357 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java @@ -45,15 +45,7 @@ private void process(Object lastValue, Object nextValue) throws Exception { DataRepositoryAdapter adapter = getPipeTaskContext().getDataRepositoryAdapter(); String table = getPipeTaskContext().getTable(); long pageSize = getPipeTaskContext().getPageSize(); - SeekQueryDefinition queryDefinition = new SeekQueryDefinition(); - queryDefinition.setBatchId(batchId); - queryDefinition.setTable(table); - queryDefinition.setColumn(batchColumn); - queryDefinition.setLastColumnValue(lastValue); - queryDefinition.setNextColumnValue(nextValue); - queryDefinition.setBatchSize(pageSize); - queryDefinition.setDeletionEnabled(ctx.getMigrationContext().isDeletionEnabled()); - queryDefinition.setLpTableEnabled(ctx.getMigrationContext().isLpTableMigrationEnabled()); + SeekQueryDefinition queryDefinition = createSeekQueryDefinition(lastValue, nextValue, table, pageSize, ctx); if (LOG.isDebugEnabled()) { LOG.debug("Executing markers query for {} with lastvalue: {}, nextvalue: {}, batchsize: {}", table, lastValue, nextValue, pageSize); @@ -65,4 +57,18 @@ private void process(Object lastValue, Object nextValue) throws Exception { getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, pageSize); getPipeTaskContext().getPipe().put(MaybeFinished.of(page)); } + + protected SeekQueryDefinition createSeekQueryDefinition(final Object lastValue, final Object nextValue, + final String table, final long pageSize, final CopyContext ctx) { + SeekQueryDefinition queryDefinition = new SeekQueryDefinition(); + queryDefinition.setBatchId(batchId); + queryDefinition.setTable(table); + queryDefinition.setColumn(batchColumn); + queryDefinition.setLastColumnValue(lastValue); + queryDefinition.setNextColumnValue(nextValue); + queryDefinition.setBatchSize(pageSize); + queryDefinition.setDeletionEnabled(ctx.getMigrationContext().isDeletionEnabled()); + queryDefinition.setLpTableEnabled(ctx.getMigrationContext().isLpTableMigrationEnabled()); + return queryDefinition; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/PartitionedBatchMarkerDataReaderTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/PartitionedBatchMarkerDataReaderTask.java new file mode 100644 index 0000000..bdfc0ef --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/PartitionedBatchMarkerDataReaderTask.java @@ -0,0 +1,33 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; + +import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import org.apache.commons.lang3.tuple.Pair; + +public class PartitionedBatchMarkerDataReaderTask extends BatchMarkerDataReaderTask { + private final String partition; + + public PartitionedBatchMarkerDataReaderTask(PipeTaskContext pipeTaskContext, int batchId, String batchColumn, + Pair batchMarkersPair, boolean upperBoundInclusive, String partition) { + super(pipeTaskContext, batchId, batchColumn, batchMarkersPair, upperBoundInclusive); + this.partition = partition; + } + + @Override + protected SeekQueryDefinition createSeekQueryDefinition(final Object lastValue, final Object nextValue, + final String table, final long pageSize, final CopyContext ctx) { + final var seekQueryDefinition = super.createSeekQueryDefinition(lastValue, nextValue, table, pageSize, ctx); + seekQueryDefinition.setPartition(getPartition()); + return seekQueryDefinition; + } + + public String getPartition() { + return partition; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java index 120c5b1..f771b59 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java @@ -21,7 +21,8 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String MIGRATION_SCHEMA_TARGET_COLUMNS_ADD_ENABLED = "migration.schema.target.columns.add.enabled"; public static final String MIGRATION_SCHEMA_TARGET_COLUMNS_REMOVE_ENABLED = "migration.schema.target.columns.remove.enabled"; public static final String MIGRATION_TARGET_MAX_STAGE_MIGRATIONS = "migration.ds.target.db.max.stage.migrations"; - public static final String MIGRATION_DATA_EXPORT_ENABLED = "migration.data.export.enabled"; + public static final String MIGRATION_DATA_SYNCHRONIZATION_ENABLED = "migration.data.synchronization.enabled"; + public static final String MIGRATION_INTERNAL_TABLES_STORAGE = "migration.internal.tables.storage"; public static final String MIGRATION_SCHEMA_AUTOTRIGGER_ENABLED = "migration.schema.autotrigger.enabled"; public static final String MIGRATION_DATA_FULLDATABASE = "migration.data.fulldatabase.enabled"; public static final String MIGRATION_DATA_READER_BATCHSIZE = "migration.data.reader.batchsize"; @@ -36,6 +37,7 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String MIGRATION_DATA_MAXPRALLELTABLECOPY = "migration.data.maxparalleltablecopy"; public static final String MIGRATION_DATA_FAILONEERROR_ENABLED = "migration.data.failonerror.enabled"; public static final String MIGRATION_DATA_COLUMNS_EXCLUDED = "migration.data.columns.excluded"; + public static final String MIGRATION_DATA_COLUMNS_BATCH = "migration.data.columns.batch"; public static final String MIGRATION_DATA_COLUMNS_NULLIFY = "migration.data.columns.nullify"; public static final String MIGRATION_DATA_INDICES_DROP_ENABLED = "migration.data.indices.drop.enabled"; public static final String MIGRATION_DATA_INDICES_DISABLE_ENABLED = "migration.data.indices.disable.enabled"; @@ -44,6 +46,9 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String MIGRATION_DATA_TABLES_CUSTOM = "migration.data.tables.custom"; public static final String MIGRATION_DATA_TABLES_EXCLUDED = "migration.data.tables.excluded"; public static final String MIGRATION_DATA_TABLES_INCLUDED = "migration.data.tables.included"; + public static final String MIGRATION_DATA_TABLES_ORDERED_FIRST = "migration.data.tables.order.first"; + public static final String MIGRATION_DATA_TABLES_ORDERED_LAST = "migration.data.tables.order.last"; + public static final String MIGRATION_DATA_TABLES_PARTITIONED = "migration.data.tables.partitioned"; public static final String MIGRATION_CLUSTER_ENABLED = "migration.cluster.enabled"; public static final String MIGRATION_DATA_INCREMENTAL_ENABLED = "migration.data.incremental.enabled"; public static final String MIGRATION_DATA_INCREMENTAL_TABLES = "migration.data.incremental.tables"; @@ -113,6 +118,9 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta // MSSQL Post Processing public static final String MIGRATION_DATA_MSSQL_UPDATE_STATISTICS_ENABLED = "migration.data.mssql.update.statistics.enabled"; + public static final String MIGRATION_INTERNAL_TABLES_STORAGE_TARGET = "target"; + public static final String MIGRATION_INTERNAL_TABLES_STORAGE_SOURCE = "source"; + private CommercedbsyncConstants() { // empty to avoid instantiating this constant class } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java index 0f16023..dc7b392 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java @@ -7,6 +7,7 @@ package com.sap.cx.boosters.commercedbsync.context; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; +import org.slf4j.Logger; import java.sql.SQLException; import java.time.Instant; @@ -22,6 +23,8 @@ public interface MigrationContext { DataRepository getDataTargetRepository(); + DataRepository getDataRepository(); + boolean isMigrationTriggeredByUpdateProcess(); boolean isSchemaMigrationEnabled(); @@ -78,12 +81,22 @@ public interface MigrationContext { Map> getNullifyColumns(); + Map> getBatchColumns(); + Set getCustomTables(); Set getExcludedTables(); Set getIncludedTables(); + Set getTablesOrderedAsFirst(); + + Set getTablesOrderedAsLast(); + + boolean isTablesOrdered(); + + Set getPartitionedTables(); + boolean isDropAllIndexesEnabled(); boolean isDisableAllIndexesEnabled(); @@ -102,13 +115,17 @@ public interface MigrationContext { int getDataPipeCapacity(); + boolean isReversed(); + int getStalledTimeout(); String getFileStorageConnectionString(); int getMaxTargetStagedMigrations(); - boolean isDataExportEnabled(); + boolean isDataSynchronizationEnabled(); + + String getInternalTablesStorage(); boolean isDeletionEnabled(); @@ -204,4 +221,41 @@ public interface MigrationContext { Set getTablesForViews(); String getViewColumnPrefixFor(String tableName); + + default void dumpLog(Logger logger) { + logger.info("-------- MIGRATION CONTEXT - START ----------"); + + logger.info("isAddMissingColumnsToSchemaEnabled={}", isAddMissingColumnsToSchemaEnabled()); + logger.info("isAddMissingTablesToSchemaEnabled={}", isAddMissingTablesToSchemaEnabled()); + logger.info("isAuditTableMigrationEnabled={}", isAuditTableMigrationEnabled()); + logger.info("isClusterMode={}", isClusterMode()); + logger.info("isDeletionEnabled={}", isDeletionEnabled()); + logger.info("isDisableAllIndexesEnabled={}", isDisableAllIndexesEnabled()); + logger.info("isDropAllIndexesEnabled={}", isDropAllIndexesEnabled()); + logger.info("isFailOnErrorEnabled={}", isFailOnErrorEnabled()); + logger.info("isIncrementalModeEnabled={}", isIncrementalModeEnabled()); + logger.info("isMigrationTriggeredByUpdateProcess={}", isMigrationTriggeredByUpdateProcess()); + logger.info("isRemoveMissingColumnsToSchemaEnabled={}", isRemoveMissingColumnsToSchemaEnabled()); + logger.info("isRemoveMissingTablesToSchemaEnabled={}", isRemoveMissingTablesToSchemaEnabled()); + logger.info("isSchemaMigrationAutoTriggerEnabled={}", isSchemaMigrationAutoTriggerEnabled()); + logger.info("isSchemaMigrationEnabled={}", isSchemaMigrationEnabled()); + logger.info("isTruncateEnabled={}", isTruncateEnabled()); + logger.info("getIncludedTables={}", getIncludedTables()); + logger.info("getExcludedTables={}", getExcludedTables()); + logger.info("getIncrementalTables={}", getIncrementalTables()); + logger.info("getTablesOrderedAsFirst={}", getTablesOrderedAsFirst()); + logger.info("getTablesOrderedAsLast={}", getTablesOrderedAsLast()); + logger.info("getTruncateExcludedTables={}", getTruncateExcludedTables()); + logger.info("getCustomTables={}", getCustomTables()); + logger.info("getIncrementalTimestamp={}", getIncrementalTimestamp()); + logger.info("Source TS Name={}", getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); + logger.info("Source TS Suffix={}", + getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); + logger.info("Target TS Name={}", getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); + logger.info("Target TS Suffix={}", + getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); + logger.info("getItemTypeViewNamePattern={}", getItemTypeViewNamePattern()); + + logger.info("-------- MIGRATION CONTEXT - END ----------"); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java index 42b59f1..77e30e2 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java @@ -17,20 +17,23 @@ public class MigrationContextFactory { final DataRepositoryFactory dataRepositoryFactory; final DataSourceConfigurationFactory dataSourceConfigurationFactory; final Configuration configuration; + final boolean reversed; - public MigrationContextFactory(DataRepositoryFactory dataRepositoryFactory, - DataSourceConfigurationFactory dataSourceConfigurationFactory, Configuration configuration) { + public MigrationContextFactory(final DataRepositoryFactory dataRepositoryFactory, + final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration, + final boolean reversed) { this.dataRepositoryFactory = dataRepositoryFactory; this.dataSourceConfigurationFactory = dataSourceConfigurationFactory; this.configuration = configuration; + this.reversed = reversed; } public MigrationContext create() throws Exception { - if (configuration.getBoolean(CommercedbsyncConstants.MIGRATION_DATA_EXPORT_ENABLED, false)) { + if (configuration.getBoolean(CommercedbsyncConstants.MIGRATION_DATA_SYNCHRONIZATION_ENABLED, false)) { return new DefaultIncrementalMigrationContext(dataRepositoryFactory, dataSourceConfigurationFactory, - configuration); + configuration, reversed); } - return new DefaultMigrationContext(dataRepositoryFactory, dataSourceConfigurationFactory, configuration); + return new DefaultMigrationContext(dataRepositoryFactory, dataSourceConfigurationFactory, configuration, false); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java index 9b650d6..c081fbe 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java @@ -22,16 +22,15 @@ import java.util.Set; public class DefaultIncrementalMigrationContext extends DefaultMigrationContext implements IncrementalMigrationContext { - private static final Logger LOG = Logger.getLogger(DefaultIncrementalMigrationContext.class.getName()); private Instant timestampInstant; private Set incrementalTables; private Set includedTables; public DefaultIncrementalMigrationContext(final DataRepositoryFactory dataRepositoryFactory, - final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration) - throws Exception { - super(dataRepositoryFactory, dataSourceConfigurationFactory, configuration); + final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration, + final boolean reversed) throws Exception { + super(dataRepositoryFactory, dataSourceConfigurationFactory, configuration, reversed); } @Override diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java index 468d201..6f8bb1f 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java @@ -13,6 +13,7 @@ import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfigurationFactory; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import com.sap.cx.boosters.commercedbsync.repository.impl.DataRepositoryFactory; +import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.configuration.Configuration; import org.apache.commons.lang.StringUtils; @@ -23,6 +24,9 @@ import java.util.*; import java.util.stream.Collectors; +import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MIGRATION_INTERNAL_TABLES_STORAGE_SOURCE; +import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MIGRATION_INTERNAL_TABLES_STORAGE_TARGET; + public class DefaultMigrationContext implements MigrationContext { protected final Configuration configuration; private final DataRepository dataSourceRepository; @@ -30,18 +34,25 @@ public class DefaultMigrationContext implements MigrationContext { protected boolean deletionEnabled; protected boolean lpTableMigrationEnabled; protected Set tableViewNames; + private final boolean reversed; public DefaultMigrationContext(final DataRepositoryFactory dataRepositoryFactory, - final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration) - throws Exception { + final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration, + final boolean reversed) throws Exception { this.configuration = configuration; + this.reversed = reversed; ensureDefaultLocale(configuration); final Set inputDataSourceConfigurations = getInputProfiles().stream() .map(dataSourceConfigurationFactory::create).collect(Collectors.toSet()); final Set outputDataSourceConfigurations = getOutputProfiles().stream() .map(dataSourceConfigurationFactory::create).collect(Collectors.toSet()); - this.dataSourceRepository = dataRepositoryFactory.create(this, inputDataSourceConfigurations); - this.dataTargetRepository = dataRepositoryFactory.create(this, outputDataSourceConfigurations); + if (reversed) { + this.dataSourceRepository = dataRepositoryFactory.create(this, outputDataSourceConfigurations); + this.dataTargetRepository = dataRepositoryFactory.create(this, inputDataSourceConfigurations); + } else { + this.dataSourceRepository = dataRepositoryFactory.create(this, inputDataSourceConfigurations); + this.dataTargetRepository = dataRepositoryFactory.create(this, outputDataSourceConfigurations); + } } private void ensureDefaultLocale(Configuration configuration) { String localeProperty = configuration.getString(CommercedbsyncConstants.MIGRATION_LOCALE_DEFAULT); @@ -59,6 +70,13 @@ public DataRepository getDataTargetRepository() { return dataTargetRepository; } + @Override + public DataRepository getDataRepository() { + return MIGRATION_INTERNAL_TABLES_STORAGE_TARGET.equalsIgnoreCase(getInternalTablesStorage()) + ? getDataTargetRepository() + : getDataSourceRepository(); + } + @Override public boolean isMigrationTriggeredByUpdateProcess() { return getBooleanProperty(CommercedbsyncConstants.MIGRATION_TRIGGER_UPDATESYSTEM); @@ -172,6 +190,11 @@ public Map> getNullifyColumns() { return getDynamicPropertyKeys(CommercedbsyncConstants.MIGRATION_DATA_COLUMNS_NULLIFY); } + @Override + public Map> getBatchColumns() { + return getDynamicPropertyKeys(CommercedbsyncConstants.MIGRATION_DATA_COLUMNS_BATCH); + } + @Override public Set getCustomTables() { return getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_CUSTOM); @@ -187,6 +210,31 @@ public Set getIncludedTables() { return getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_INCLUDED); } + @Override + public Set getTablesOrderedAsFirst() { + return getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_ORDERED_FIRST); + } + + @Override + public Set getTablesOrderedAsLast() { + return getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_ORDERED_LAST); + } + + @Override + public boolean isTablesOrdered() { + return CollectionUtils.isNotEmpty(getTablesOrderedAsFirst()) + || CollectionUtils.isNotEmpty(getTablesOrderedAsLast()); + } + + @Override + public Set getPartitionedTables() { + if (getDataSourceRepository().getDatabaseProvider().isHanaUsed()) { + return getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_PARTITIONED); + } else { + return Set.of(); + } + } + @Override public boolean isDropAllIndexesEnabled() { return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_INDICES_DROP_ENABLED); @@ -247,8 +295,20 @@ public int getMaxTargetStagedMigrations() { } @Override - public boolean isDataExportEnabled() { - return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_EXPORT_ENABLED); + public boolean isDataSynchronizationEnabled() { + return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_SYNCHRONIZATION_ENABLED); + } + + @Override + public String getInternalTablesStorage() { + final String migrationInternalTableStorage = getStringProperty( + CommercedbsyncConstants.MIGRATION_INTERNAL_TABLES_STORAGE); + if (!StringUtils.isBlank(migrationInternalTableStorage)) { + return migrationInternalTableStorage; + } + return isDataSynchronizationEnabled() + ? MIGRATION_INTERNAL_TABLES_STORAGE_SOURCE + : MIGRATION_INTERNAL_TABLES_STORAGE_TARGET; } @Override @@ -338,6 +398,11 @@ public void refreshSelf() { this.tableViewNames = null; } + @Override + public boolean isReversed() { + return this.reversed; + } + @Override public int getStalledTimeout() { return getNumericProperty(CommercedbsyncConstants.MIGRATION_STALLED_TIMEOUT); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java index 50827a5..3ed90f7 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java @@ -37,7 +37,7 @@ public void validateContext(final MigrationContext context) { } private void checkSourceDbIsNotTargetDb(MigrationContext context) { - if (context.isDataExportEnabled()) { + if (context.isDataSynchronizationEnabled()) { return; // in this mode, source DB can (should?) be set to CCv2 instance } @@ -54,7 +54,7 @@ private void checkSourceDbIsNotTargetDb(MigrationContext context) { private void checkSystemNotLocked(MigrationContext context) { final boolean isSystemLocked = getConfigurationService().getConfiguration().getBoolean(DISABLE_UNLOCKING); - if (!context.isDataExportEnabled() && isSystemLocked) { + if (!context.isDataSynchronizationEnabled() && isSystemLocked) { throw new RuntimeException( "You cannot run the migration on locked system. Check property " + DISABLE_UNLOCKING); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java index 3f9444b..41143b9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java @@ -14,7 +14,7 @@ public interface DataSet { - DataSet EMPTY = new DefaultDataSet(0, 0, Collections.emptyList(), Collections.emptyList()); + DataSet EMPTY = new DefaultDataSet(0, 0, Collections.emptyList(), Collections.emptyList(), null); int getBatchId(); @@ -39,4 +39,6 @@ default Object getColumnValue(String column, List row) { DataColumn getColumn(int columnIndex); DataColumn getColumn(String columnName); + + String getPartition(); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java index 74e0ab0..df103fa 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java @@ -28,12 +28,15 @@ public class DefaultDataSet implements DataSet { private final int columnCount; private final List columnOrder; private final List> result; + private final String partition; - public DefaultDataSet(int batchId, int columnCount, List columnOrder, List> result) { + public DefaultDataSet(int batchId, int columnCount, List columnOrder, List> result, + final String partition) { this.batchId = batchId; this.columnCount = columnCount; this.columnOrder = Collections.unmodifiableList(columnOrder); this.result = result.stream().map(Collections::unmodifiableList).collect(Collectors.toList()); + this.partition = partition; } @Override @@ -106,6 +109,10 @@ public DataColumn getColumn(int columnIndex) { public DataColumn getColumn(String columnName) { return IterableUtils.get(columnOrder, findColumnIndex(columnName)); } + @Override + public String getPartition() { + return partition; + } protected int findColumnIndex(String columnName) { return IterableUtils.indexOf(columnOrder, diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java index ce233d3..7aad8c7 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java @@ -10,11 +10,10 @@ * * ClusterAwareEvent to signal completion of the assigned copy ta */ public class CopyCompleteEvent extends OperationEvent { - private final Boolean copyResult = false; - public CopyCompleteEvent(final Integer sourceNodeId, final String migrationId) { - super(sourceNodeId, migrationId); + public CopyCompleteEvent(final Integer sourceNodeId, final String migrationId, final boolean reversed) { + super(sourceNodeId, migrationId, reversed); } public Boolean getCopyResult() { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java index 316f5e3..d6bb669 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java @@ -20,8 +20,8 @@ public class CopyDatabaseTableEvent extends OperationEvent { private final Map propertyOverrideMap; public CopyDatabaseTableEvent(final Integer sourceNodeId, final String migrationId, - Map propertyOverrideMap) { - super(sourceNodeId, migrationId); + final Map propertyOverrideMap, final boolean reversed) { + super(sourceNodeId, migrationId, reversed); this.propertyOverrideMap = propertyOverrideMap; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/OperationEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/OperationEvent.java index 1db3c70..dd886f9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/OperationEvent.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/OperationEvent.java @@ -14,15 +14,15 @@ * ClusterAwareEvent to notify other Nodes to start the operation */ public abstract class OperationEvent extends AbstractEvent implements ClusterAwareEvent { - private final int sourceNodeId; - private final String operationId; + private final boolean reversed; - public OperationEvent(final int sourceNodeId, final String operationId) { + public OperationEvent(final int sourceNodeId, final String operationId, final boolean reversed) { super(); this.sourceNodeId = sourceNodeId; this.operationId = operationId; + this.reversed = reversed; } @Override @@ -44,4 +44,10 @@ public String getOperationId() { return operationId; } + /** + * @return true if reversed + */ + public boolean isReversed() { + return reversed; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/SchemaDifferenceEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/SchemaDifferenceEvent.java new file mode 100644 index 0000000..a93fb19 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/SchemaDifferenceEvent.java @@ -0,0 +1,18 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.events; + +public class SchemaDifferenceEvent extends OperationEvent { + + public SchemaDifferenceEvent(final int sourceNodeId, final String migrationId, final boolean reversed) { + super(sourceNodeId, migrationId, reversed); + } + + public String getSchemaDifferenceId() { + return getOperationId(); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java index e5f049e..17a2a19 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java @@ -31,20 +31,19 @@ public class CopyCompleteEventListener extends AbstractEventListener postProcessors; + private MigrationContext reverseMigrationContext; @Override protected void onEvent(final CopyCompleteEvent event) { final String migrationId = event.getOperationId(); LOG.info("Migration finished on Node {} with result {}", event.getSourceNodeId(), event.getCopyResult()); - final CopyContext copyContext = new CopyContext(migrationId, migrationContext, new HashSet<>(), - performanceProfiler); + final CopyContext copyContext = event.isReversed() + ? new CopyContext(migrationId, reverseMigrationContext, new HashSet<>(), performanceProfiler) + : new CopyContext(migrationId, migrationContext, new HashSet<>(), performanceProfiler); executePostProcessors(copyContext); } @@ -60,7 +59,6 @@ private void executePostProcessors(final CopyContext copyContext) { Transaction.current().execute(new TransactionBody() { @Override public Object execute() throws Exception { - final boolean eligibleForPostProcessing = databaseCopyTaskRepository.setMigrationStatus(copyContext, MigrationProgress.PROCESSED, MigrationProgress.POSTPROCESSING) || databaseCopyTaskRepository.setMigrationStatus(copyContext, MigrationProgress.ABORTED, @@ -82,8 +80,8 @@ public Object execute() throws Exception { }); } catch (final Exception e) { LOG.error("Error during PostProcessor execution", e); - if (e instanceof RuntimeException) { - throw (RuntimeException) e; + if (e instanceof RuntimeException runtimeException) { + throw runtimeException; } else { throw new RuntimeException(e); } @@ -105,4 +103,8 @@ public void setPerformanceProfiler(final PerformanceProfiler performanceProfiler public void setPostProcessors(final List postProcessors) { this.postProcessors = postProcessors; } + + public void setReverseMigrationContext(final MigrationContext reverseMigrationContext) { + this.reverseMigrationContext = reverseMigrationContext; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java index 2d987fc..6725317 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java @@ -22,7 +22,7 @@ import org.slf4j.MDC; import java.io.Serializable; -import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -34,16 +34,12 @@ public class CopyDatabaseTableEventListener extends AbstractEventListener(), - performanceProfiler); + CopyContext copyContext = event.isReversed() + ? new CopyContext(migrationId, reverseMigrationContext, new LinkedHashSet<>(), performanceProfiler) + : new CopyContext(migrationId, migrationContext, new LinkedHashSet<>(), performanceProfiler); + Set copyTableTasks = databaseCopyTaskRepository.findPendingTasks(copyContext); Set items = copyTableTasks.stream() .map(task -> new CopyContext.DataCopyItem(task.getSourcetablename(), task.getTargettablename(), @@ -63,7 +61,7 @@ protected void onEvent(final CopyDatabaseTableEvent event) { ? new CopyContext.DataCopyItem.ChunkData(task.getChunk().getCurrentChunk(), task.getChunk().getChunkSize()) : null)) - .collect(Collectors.toSet()); + .collect(Collectors.toCollection(LinkedHashSet::new)); copyContext.getCopyItems().addAll(items); databaseMigrationCopyService.copyAllAsync(copyContext); @@ -103,4 +101,8 @@ public void setClusterService(ClusterService clusterService) { public void setConfigurationService(ConfigurationService configurationService) { this.configurationService = configurationService; } + + public void setReverseMigrationContext(MigrationContext reverseMigrationContext) { + this.reverseMigrationContext = reverseMigrationContext; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java index 080b02e..5619a72 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java @@ -6,12 +6,14 @@ package com.sap.cx.boosters.commercedbsync.jalo; +import de.hybris.platform.directpersistence.annotation.SLDSafe; import de.hybris.platform.jalo.Item; import de.hybris.platform.jalo.JaloBusinessException; import de.hybris.platform.jalo.SessionContext; import de.hybris.platform.jalo.type.ComposedType; import org.apache.log4j.Logger; +@SLDSafe public class ItemDeletionMarker extends GeneratedItemDeletionMarker { @SuppressWarnings("unused") private static final Logger LOG = Logger.getLogger(ItemDeletionMarker.class.getName()); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java index 7ac5ca9..6c89988 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java @@ -58,8 +58,9 @@ public class IncrementalMigrationJob extends AbstractMigrationJobPerformable { private static final String TABLE_EXISTS_SELECT_STATEMENT_HANA = "SELECT TABLE_NAME \n" + " FROM public.tables \n" + " WHERE schema_name = upper('%s') \n" + " AND table_name = upper('%2$s') "; - private static final String TABLE_EXISTS_SELECT_STATEMENT_POSTGRES = "SELECT TABLE_NAME \n" - + " FROM public.tables \n" + " WHERE schema_name = upper('%s') \n" + " AND table_name = upper('%2$s') "; + private static final String TABLE_EXISTS_SELECT_STATEMENT_POSTGRES = "SELECT table_name \n" + + " FROM information_schema.tables \n" + " WHERE table_schema = upper('%s') \n" + + " AND table_name = upper('%2$s') "; @Resource(name = "typeService") private TypeService typeService; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java index 832adfc..9694278 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java @@ -9,20 +9,23 @@ import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; +import com.sap.cx.boosters.commercedbsync.enums.ItemChangeType; +import com.sap.cx.boosters.commercedbsync.model.ItemDeletionMarkerModel; import de.hybris.platform.jalo.type.TypeManager; import de.hybris.platform.servicelayer.model.ModelService; -import de.hybris.platform.servicelayer.type.TypeService; import de.hybris.platform.tx.AfterSaveEvent; import de.hybris.platform.tx.AfterSaveListener; import de.hybris.platform.util.Config; +import de.hybris.platform.util.persistence.PersistenceUtils; +import it.unimi.dsi.fastutil.objects.ObjectArrayList; +import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet; + import java.util.Collection; -import java.util.Collections; import java.util.List; +import java.util.Set; import org.apache.commons.lang.StringUtils; -import com.sap.cx.boosters.commercedbsync.enums.ItemChangeType; -import com.sap.cx.boosters.commercedbsync.model.ItemDeletionMarkerModel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,13 +40,13 @@ public class DefaultCMTAfterSaveListener implements AfterSaveListener { private ModelService modelService; - private static final String COMMA_SEPERATOR = ","; - - private TypeService typeService; + private static final String COMMA_SEPARATOR = ","; private static final boolean deletionsEnabled = Config .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES_ENABLED, false); + private static final Set deletionsTypeCode = getListDeletionsTypeCode(); + @Override public void afterSave(final Collection events) { if (!deletionsEnabled) { @@ -53,29 +56,31 @@ public void afterSave(final Collection events) { return; } - List deletionsTypeCode = getListDeletionsTypeCode(); - if (deletionsTypeCode == null || deletionsTypeCode.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("No typecode defined to create a deletion record for CMT "); } return; } - events.forEach(event -> { - { - final int type = event.getType(); - final String typeCodeAsString = event.getPk().getTypeCodeAsString(); - if (AfterSaveEvent.REMOVE == type && deletionsTypeCode.contains(typeCodeAsString)) { - final String tableName = TypeManager.getInstance().getRootComposedType(event.getPk().getTypeCode()) - .getTable(); - final ItemDeletionMarkerModel idm = modelService.create(ItemDeletionMarkerModel.class); - convertAndfillInitialDeletionMarker(idm, event.getPk().getLong(), tableName); - modelService.save(idm); + PersistenceUtils.doWithSLDPersistence(() -> { + final List itemDeletionMarkerModels = new ObjectArrayList<>(); + events.forEach(event -> { + { + final int type = event.getType(); + final String typeCodeAsString = event.getPk().getTypeCodeAsString(); + if (AfterSaveEvent.REMOVE == type && deletionsTypeCode.contains(typeCodeAsString)) { + final String tableName = TypeManager.getInstance() + .getRootComposedType(event.getPk().getTypeCode()).getTable(); + final ItemDeletionMarkerModel idm = modelService.create(ItemDeletionMarkerModel.class); + convertAndfillInitialDeletionMarker(idm, event.getPk().getLong(), tableName); + itemDeletionMarkerModels.add(idm); + } } - } + }); + modelService.saveAll(itemDeletionMarkerModels); + return null; }); - } private void convertAndfillInitialDeletionMarker(final ItemDeletionMarkerModel marker, final Long itemPK, @@ -88,23 +93,18 @@ private void convertAndfillInitialDeletionMarker(final ItemDeletionMarkerModel m marker.setChangeType(ItemChangeType.DELETED); } - // TO DO change to static variable - private List getListDeletionsTypeCode() { + private static Set getListDeletionsTypeCode() { final String typeCodes = Config .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES, ""); if (StringUtils.isEmpty(typeCodes)) { - return Collections.emptyList(); + return Set.of(); } - List result = Splitter.on(COMMA_SEPERATOR).omitEmptyStrings().trimResults().splitToList(typeCodes); + List result = Splitter.on(COMMA_SEPARATOR).omitEmptyStrings().trimResults().splitToList(typeCodes); - return result; + return new ObjectOpenHashSet<>(result); } public void setModelService(final ModelService modelService) { this.modelService = modelService; } - - public void setTypeService(TypeService typeService) { - this.typeService = typeService; - } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java index 47dacd9..d7f7fa5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java @@ -41,7 +41,9 @@ public PerformanceRecorder(PerformanceCategory category, String name, boolean au } public void start() { - this.timer.start(); + if (!this.timer.isRunning()) { + this.timer.start(); + } } public void pause() { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java index b8dcf17..cf52eb3 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java @@ -73,7 +73,7 @@ public void process(final CopyContext context) { @Override public boolean shouldExecute(CopyContext context) { - return !isPostProcesorDisabled() && !context.getMigrationContext().isDataExportEnabled() + return !isPostProcesorDisabled() && !context.getMigrationContext().isDataSynchronizationEnabled() && context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isMssqlUsed(); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/MSSQLUpdateStatisticsPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/MSSQLUpdateStatisticsPostProcessor.java index c0e397d..aba04c0 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/MSSQLUpdateStatisticsPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/MSSQLUpdateStatisticsPostProcessor.java @@ -71,10 +71,8 @@ public boolean shouldExecute(CopyContext context) { return false; } - if (!status.isFailed() && !context.getMigrationContext().isDataExportEnabled() - && context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isMssqlUsed()) { - return true; - } + return !status.isFailed() && !context.getMigrationContext().isDataSynchronizationEnabled() + && context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isMssqlUsed(); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java index d80cdd5..726d88a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java @@ -38,8 +38,10 @@ public void process(CopyContext context) { try { final GsonBuilder gsonBuilder = new GsonBuilder(); + gsonBuilder.setPrettyPrinting(); gsonBuilder.registerTypeAdapter(LocalDateTime.class, new LocalDateTypeAdapter()); - Gson gson = gsonBuilder.setPrettyPrinting().create(); + gsonBuilder.disableHtmlEscaping(); + Gson gson = gsonBuilder.create(); MigrationReport migrationReport = databaseMigrationReportService.getMigrationReport(context); InputStream is = new ByteArrayInputStream(gson.toJson(migrationReport).getBytes(StandardCharsets.UTF_8)); databaseMigrationReportStorageService.store(context.getMigrationId() + ".json", is); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java index 068a030..ee91b94 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java @@ -48,7 +48,7 @@ private String getPlatformSpecificSQL(final DataBaseProvider databaseProvider) { @Override public boolean shouldExecute(CopyContext context) { - return context.getMigrationContext().isDataExportEnabled() + return context.getMigrationContext().isDataSynchronizationEnabled() && context.getMigrationContext().getDataSourceRepository().getDatabaseProvider().isMssqlUsed(); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java index 0773434..61705e9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java @@ -43,7 +43,7 @@ public void process(final CopyContext context) { @Override public boolean shouldExecute(CopyContext context) { - return context.getMigrationContext().isDataExportEnabled() + return context.getMigrationContext().isDataSynchronizationEnabled() && context.getMigrationContext().isFullDatabaseMigration() && CollectionUtils.isNotEmpty(context.getMigrationContext().getIncludedTables()); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java index 69ca53e..e45f370 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java @@ -68,7 +68,7 @@ public void process(final CopyContext context) { @Override public boolean shouldExecute(CopyContext context) { - return context.getMigrationContext().isDataExportEnabled() + return context.getMigrationContext().isDataSynchronizationEnabled() && context.getMigrationContext().getDataSourceRepository().getDatabaseProvider().isMssqlUsed(); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java index e95830b..65d6ea5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java @@ -40,6 +40,6 @@ public void process(final CopyContext context) { @Override public boolean shouldExecute(CopyContext context) { - return context.getMigrationContext().isDataExportEnabled(); + return context.getMigrationContext().isDataSynchronizationEnabled(); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java index 47c61b5..141aedf 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java @@ -55,6 +55,6 @@ public void process(final CopyContext context) { @Override public boolean shouldExecute(CopyContext context) { - return context.getMigrationContext().isDataExportEnabled(); + return context.getMigrationContext().isDataSynchronizationEnabled(); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java index b041d24..06bcc5f 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java @@ -118,4 +118,8 @@ long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnab void clearJdbcQueriesStore(); String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs); + + List getPartitions(String table) throws SQLException; + + long getRowCount(String table, String currentPartition) throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java index 0a0813e..8c63009 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java @@ -199,6 +199,10 @@ protected String createRowCountQuery() { return "SELECT COUNT(*) FROM %s WHERE %s"; } + protected String createRowCountModifiedAfterQuery() { + return "SELECT COUNT_BIG(*) FROM %s WHERE modifiedts > ? AND %s"; + } + @Override public long getRowCount(String table) throws SQLException { List conditions = new ArrayList<>(1); @@ -268,19 +272,21 @@ public DataSet getAllModifiedAfter(String table, Instant time) throws Exception } protected DefaultDataSet convertToDataSet(ResultSet resultSet) throws Exception { - return convertToDataSet(resultSet, Collections.emptySet()); + return convertToDataSet(resultSet, Collections.emptySet(), null); } - protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet) throws Exception { - return convertToDataSet(batchId, resultSet, Collections.emptySet()); + protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet, final String partition) + throws Exception { + return convertToDataSet(batchId, resultSet, Collections.emptySet(), partition); } - protected DefaultDataSet convertToDataSet(ResultSet resultSet, Set ignoreColumns) throws Exception { - return convertToDataSet(0, resultSet, ignoreColumns); + protected DefaultDataSet convertToDataSet(ResultSet resultSet, Set ignoreColumns, final String partition) + throws Exception { + return convertToDataSet(0, resultSet, ignoreColumns, partition); } - protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet, Set ignoreColumns) - throws Exception { + protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet, Set ignoreColumns, + final String partition) throws Exception { int realColumnCount = resultSet.getMetaData().getColumnCount(); List columnOrder = new ArrayList<>(); int columnCount = 0; @@ -308,7 +314,7 @@ protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet, Set< } results.add(row); } - return new DefaultDataSet(batchId, columnCount, columnOrder, results); + return new DefaultDataSet(batchId, columnCount, columnOrder, results, partition); } @Override @@ -628,7 +634,7 @@ public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Inst stmt.setObject(nextColumnConditionIndex, queryDefinition.getNextColumnValue()); } ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet, queryDefinition.getPartition()); } } @@ -730,7 +736,11 @@ protected String expandConditions(Collection conditions) { } protected DataSet convertToBatchDataSet(int batchId, ResultSet resultSet) throws Exception { - return convertToDataSet(batchId, resultSet); + return convertToDataSet(batchId, resultSet, null); + } + + protected DataSet convertToBatchDataSet(int batchId, ResultSet resultSet, final String partition) throws Exception { + return convertToDataSet(batchId, resultSet, partition); } @Override @@ -791,4 +801,14 @@ protected abstract String getBulkInsertStatementParamList(List columnsTo protected abstract String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, List upsertIDs); + @Override + public List getPartitions(String table) throws SQLException { + throw new UnsupportedOperationException("getPartitions() -> This method is not available on all databases"); + } + + @Override + public long getRowCount(String table, String currentPartition) throws SQLException { + throw new UnsupportedOperationException( + "getRowCount(table,partition) -> This method is not available on all databases"); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java index b0baa29..712d367 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java @@ -6,14 +6,13 @@ package com.sap.cx.boosters.commercedbsync.repository.impl; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.Statement; -import java.sql.Types; +import java.sql.*; import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; +import java.time.Instant; +import java.util.ArrayList; import javax.sql.DataSource; import com.google.common.base.Joiner; @@ -247,6 +246,53 @@ protected String createRowCountQuery() { return "SELECT COUNT_BIG(*) FROM %s WHERE %s"; } + @Override + protected String createRowCountModifiedAfterQuery() { + return "SELECT COUNT_BIG(*) FROM %s WHERE modifiedts > ? AND %s"; + } + + @Override + public long getRowCount(String table) throws SQLException { + List conditionsList = new ArrayList<>(1); + processDefaultConditions(table, conditionsList); + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection(); + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt + .executeQuery(String.format(createRowCountQuery(), table, expandConditions(conditions)))) { + long value = 0; + if (resultSet.next()) { + value = resultSet.getLong(1); + } + return value; + } + } + + @Override + public long getRowCountModifiedAfter(String table, Instant time) throws SQLException { + List conditionsList = new ArrayList<>(1); + processDefaultConditions(table, conditionsList); + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection()) { + try (PreparedStatement stmt = connection.prepareStatement( + String.format(createRowCountModifiedAfterQuery(), table, expandConditions(conditions)))) { + stmt.setTimestamp(1, Timestamp.from(time)); + ResultSet resultSet = stmt.executeQuery(); + long value = 0; + if (resultSet.next()) { + value = resultSet.getLong(1); + } + return value; + } + } + } + @Override public DataBaseProvider getDatabaseProvider() { return DataBaseProvider.MSSQL; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java index c8a3bfa..fe863f5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java @@ -35,6 +35,11 @@ import de.hybris.bootstrap.ddl.DatabaseSettings; import de.hybris.bootstrap.ddl.HybrisPlatform; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; + public class HanaDataRepository extends AbstractDataRepository { private static final Logger LOG = LoggerFactory.getLogger(HanaDataRepository.class); @@ -52,14 +57,21 @@ protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, St @Override protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), - expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); + if (queryDefinition.getPartition() == null) { + return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); + } else { + return String.format("select * from %s PARTITION(%s) where %s order by %s limit %s", + queryDefinition.getTable(), queryDefinition.getPartition(), expandConditions(conditions), + queryDefinition.getColumn(), queryDefinition.getBatchSize()); + } } @Override protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { - String column = queryDefinition.getColumn(); - // spotless:off + if (queryDefinition.getPartition() == null) { + String column = queryDefinition.getColumn(); + // spotless:off return String.format("SELECT t.%s, t.rownr as \"rownum\" \n" + "FROM\n" + "(\n" + @@ -69,8 +81,20 @@ protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, "WHERE mod(t.rownr,%s) = 0\n" + "ORDER BY t.%s", // spotless:on - column, column, column, queryDefinition.getTable(), expandConditions(conditions), - queryDefinition.getBatchSize(), column); + column, column, column, queryDefinition.getTable(), expandConditions(conditions), + queryDefinition.getBatchSize(), column); + } else { + String column = queryDefinition.getColumn(); + final var format = String.format( + "SELECT t.%s, t.rownr as \"rownum\" \n" + "FROM\n" + "(\n" + + " SELECT %s, (ROW_NUMBER() OVER (ORDER BY %s))-1 AS rownr\n" + + " FROM %s PARTITION(%s)\n WHERE %s" + ") t\n" + "WHERE mod(t.rownr,%s) = 0\n" + + "ORDER BY t.%s", + column, column, column, queryDefinition.getTable(), queryDefinition.getPartition(), + expandConditions(conditions), queryDefinition.getBatchSize(), column); + LOG.debug("buildBatchMarkersQuery={}", format); + return format; + } } @Override @@ -184,10 +208,47 @@ protected String getBulkUpdateStatementParamList(List columnsToCopy, Lis .collect(Collectors.joining(", ")); } + @Override + public List getPartitions(String table) throws SQLException { + List partitions = new ArrayList<>(); + try (Connection connection = getConnection(); Statement stmt = connection.createStatement()) { + ResultSet resultSet = stmt.executeQuery(String.format( + "SELECT PART_ID FROM TABLE_PARTITIONS where lower(TABLE_NAME) = lower('%s') and lower(SCHEMA_NAME) = lower('%s')", + table, getDataSourceConfiguration().getSchema())); + while (resultSet.next()) { + partitions.add(resultSet.getString("PART_ID")); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("getPartitions result={}", Arrays.toString(partitions.toArray())); + } + return partitions; + } + @Override protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource dataSource) { HybrisPlatform instance = MigrationHybrisHANAPlatform.build(databaseSettings); instance.setDataSource(dataSource); return instance; } + + @Override + public long getRowCount(String table, String currentPartition) throws SQLException { + List conditionsList = new ArrayList<>(1); + processDefaultConditions(table, conditionsList); + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection(); + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(String.format("select count(*) from %s PARTITION(%s) where %s", + table, currentPartition, expandConditions(conditions)))) { + long value = 0; + if (resultSet.next()) { + value = resultSet.getLong(1); + } + return value; + } + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java index 01eaa7b..3a992cc 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java @@ -27,6 +27,7 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; +import java.util.Collections; import java.util.List; public class MySQLDataRepository extends AbstractDataRepository { @@ -141,7 +142,20 @@ public DataBaseProvider getDatabaseProvider() { @Override public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { - throw new UnsupportedOperationException("not implemented"); + final StringBuilder sqlBuilder = new StringBuilder(); + + sqlBuilder.append(String.format("INSERT INTO %s (", table)); + sqlBuilder.append(String.join(", ", columnsToCopy)); + sqlBuilder.append(") VALUES ("); + sqlBuilder.append(String.join(", ", Collections.nCopies(columnsToCopy.size(), "?"))); + sqlBuilder.append(") "); + sqlBuilder.append("ON DUPLICATE KEY UPDATE "); + for (String column : columnsToCopy) { + sqlBuilder.append(String.format("%s = VALUES(%s), ", column, column)); + } + sqlBuilder.setLength(sqlBuilder.length() - 2); + sqlBuilder.append(";"); + return sqlBuilder.toString(); } @Override diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java index 07ed536..d905181 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java @@ -238,6 +238,16 @@ public String buildBulkUpsertStatement(String table, List columnsToCopy, throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); } + @Override + public List getPartitions(final String table) throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public long getRowCount(final String table, final String currentPartition) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + @Override public String getDatabaseTimezone() { return null; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java index 835cba2..db05602 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java @@ -54,8 +54,9 @@ private void ensureJdbcCompliance() { } @Override - protected DataSet convertToBatchDataSet(int batchId, final ResultSet resultSet) throws Exception { - return convertToDataSet(batchId, resultSet, Collections.singleton("rn")); + protected DataSet convertToBatchDataSet(int batchId, final ResultSet resultSet, final String partition) + throws Exception { + return convertToDataSet(batchId, resultSet, Collections.singleton("rn"), partition); } @Override diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java index b02556f..6967b0d 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java @@ -37,12 +37,8 @@ import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; +import java.util.*; +import java.util.function.ToLongFunction; import java.util.stream.Collectors; import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MDC_CLUSTERID; @@ -82,14 +78,17 @@ public void schedule(CopyContext context) throws Exception { List> itemsToSchedule = generateSchedulerItemList(context, dataRepositoryAdapter); databaseCopyTaskRepository.createMigrationStatus(context, itemsToSchedule.size()); + long itemOrder = 0; for (final Pair itemToSchedule : itemsToSchedule) { CopyContext.DataCopyItem dataCopyItem = itemToSchedule.getLeft(); final long sourceRowCount = itemToSchedule.getRight(); if (sourceRowCount > 0) { final int destinationNodeId = databaseOperationSchedulerAlgorithm.next(); - databaseCopyTaskRepository.scheduleTask(context, dataCopyItem, sourceRowCount, destinationNodeId); + databaseCopyTaskRepository.scheduleTask(context, dataCopyItem, ++itemOrder, sourceRowCount, + destinationNodeId); } else { - databaseCopyTaskRepository.scheduleTask(context, dataCopyItem, sourceRowCount, ownNodeId); + databaseCopyTaskRepository.scheduleTask(context, dataCopyItem, itemOrder++, sourceRowCount, + ownNodeId); databaseCopyTaskRepository.markTaskCompleted(context, dataCopyItem, "0"); if (!context.getMigrationContext().isIncrementalModeEnabled() && context.getMigrationContext().isTruncateEnabled()) { @@ -100,7 +99,7 @@ public void schedule(CopyContext context) throws Exception { } startMonitorThread(context); final CopyDatabaseTableEvent event = new CopyDatabaseTableEvent(ownNodeId, context.getMigrationId(), - context.getPropertyOverrideMap()); + context.getPropertyOverrideMap(), context.getMigrationContext().isReversed()); eventService.publishEvent(event); } else { throw new IllegalStateException( @@ -124,48 +123,14 @@ public void resumeUnfinishedItems(CopyContext copyContext) throws Exception { startMonitorThread(copyContext); final CopyDatabaseTableEvent event = new CopyDatabaseTableEvent( databaseOperationSchedulerAlgorithm.getOwnNodeId(), copyContext.getMigrationId(), - copyContext.getPropertyOverrideMap()); + copyContext.getPropertyOverrideMap(), copyContext.getMigrationContext().isReversed()); eventService.publishEvent(event); } private void logMigrationContext(final MigrationContext context) { - if (!Config.getBoolean("migration.log.context.details", true)) { - return; + if (Config.getBoolean("migration.log.context.details", true)) { + context.dumpLog(LOG); } - - LOG.info("--------MIGRATION CONTEXT- START----------"); - LOG.info("isAddMissingColumnsToSchemaEnabled=" + context.isAddMissingColumnsToSchemaEnabled()); - LOG.info("isAddMissingTablesToSchemaEnabled=" + context.isAddMissingTablesToSchemaEnabled()); - LOG.info("isAuditTableMigrationEnabled=" + context.isAuditTableMigrationEnabled()); - LOG.info("isClusterMode=" + context.isClusterMode()); - LOG.info("isDeletionEnabled=" + context.isDeletionEnabled()); - LOG.info("isDisableAllIndexesEnabled=" + context.isDisableAllIndexesEnabled()); - LOG.info("isDropAllIndexesEnabled=" + context.isDropAllIndexesEnabled()); - LOG.info("isFailOnErrorEnabled=" + context.isFailOnErrorEnabled()); - LOG.info("isIncrementalModeEnabled=" + context.isIncrementalModeEnabled()); - LOG.info("isMigrationTriggeredByUpdateProcess=" + context.isMigrationTriggeredByUpdateProcess()); - LOG.info("isRemoveMissingColumnsToSchemaEnabled=" + context.isRemoveMissingColumnsToSchemaEnabled()); - LOG.info("isRemoveMissingTablesToSchemaEnabled=" + context.isRemoveMissingTablesToSchemaEnabled()); - LOG.info("isSchemaMigrationAutoTriggerEnabled=" + context.isSchemaMigrationAutoTriggerEnabled()); - LOG.info("isSchemaMigrationEnabled=" + context.isSchemaMigrationEnabled()); - LOG.info("isTruncateEnabled=" + context.isTruncateEnabled()); - LOG.info("getIncludedTables=" + context.getIncludedTables()); - LOG.info("getExcludedTables=" + context.getExcludedTables()); - LOG.info("getIncrementalTables=" + context.getIncrementalTables()); - LOG.info("getTruncateExcludedTables=" + context.getTruncateExcludedTables()); - LOG.info("getCustomTables=" + context.getCustomTables()); - LOG.info("getIncrementalTimestamp=" + context.getIncrementalTimestamp()); - LOG.info( - "Source TS Name=" + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Source TS Suffix=" - + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info( - "Target TS Name=" + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Target TS Suffix=" - + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info("getItemTypeViewNamePattern=" + context.getItemTypeViewNamePattern()); - - LOG.info("--------MIGRATION CONTEXT- END----------"); } private List> generateSchedulerItemList(CopyContext context, @@ -181,7 +146,21 @@ private List> generateSchedulerItemList(Cop LOG.debug("Found {} rows in table: {}", rowCount, copyItem.getSourceItem()); } // we sort the items to make sure big tables are assigned to nodes in a fair way - return pairs.stream().sorted(Comparator.comparingLong(Pair::getRight)).collect(Collectors.toList()); + return pairs.stream().sorted(Comparator.comparingLong(customizedTablesOrder(context))) + .collect(Collectors.toList()); + } + + private ToLongFunction> customizedTablesOrder(CopyContext context) { + if (context.getMigrationContext().isTablesOrdered()) { + final Set first = context.getMigrationContext().getTablesOrderedAsFirst(); + final Set last = context.getMigrationContext().getTablesOrderedAsLast(); + + return pair -> first.contains(pair.getKey().getSourceItem()) + ? -1 + : last.contains(pair.getKey().getSourceItem()) ? Integer.MAX_VALUE : pair.getRight(); + } else { + return Pair::getRight; + } } /** @@ -305,7 +284,8 @@ private void pollState() throws Exception { */ private void notifyFinished() { final CopyCompleteEvent completeEvent = new CopyCompleteEvent( - databaseOperationSchedulerAlgorithm.getOwnNodeId(), context.getMigrationId()); + databaseOperationSchedulerAlgorithm.getOwnNodeId(), context.getMigrationId(), + context.getMigrationContext().isReversed()); eventService.publishEvent(completeEvent); } @@ -366,5 +346,4 @@ protected void cleanupThread() { Registry.unsetCurrentTenant(); } } - } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/DefaultDatabaseSchemaDifferenceScheduler.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/DefaultDatabaseSchemaDifferenceScheduler.java index f94b84e..ec4a33a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/DefaultDatabaseSchemaDifferenceScheduler.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/DefaultDatabaseSchemaDifferenceScheduler.java @@ -25,9 +25,7 @@ public class DefaultDatabaseSchemaDifferenceScheduler implements DatabaseSchemaD private DatabaseSchemaDifferenceTaskRepository databaseSchemaDifferenceTaskRepository; private void prepare(final MigrationContext context) { - final DataRepository repository = !context.isDataExportEnabled() - ? context.getDataTargetRepository() - : context.getDataSourceRepository(); + final DataRepository repository = context.getDataRepository(); final DataBaseProvider databaseProvider = repository.getDatabaseProvider(); final ClassPathResource scriptResource = new ClassPathResource( String.format("/sql/createSchemaSchedulerTables%s.sql", databaseProvider)); @@ -58,43 +56,9 @@ public void abort(SchemaDifferenceContext context) throws Exception { } private void logMigrationContext(final MigrationContext context) { - if (!Config.getBoolean("migration.log.context.details", true) || context == null) { - return; + if (Config.getBoolean("migration.log.context.details", true) && context != null) { + context.dumpLog(LOG); } - - LOG.info("--------MIGRATION CONTEXT- START----------"); - LOG.info("isAddMissingColumnsToSchemaEnabled=" + context.isAddMissingColumnsToSchemaEnabled()); - LOG.info("isAddMissingTablesToSchemaEnabled=" + context.isAddMissingTablesToSchemaEnabled()); - LOG.info("isAuditTableMigrationEnabled=" + context.isAuditTableMigrationEnabled()); - LOG.info("isClusterMode=" + context.isClusterMode()); - LOG.info("isDeletionEnabled=" + context.isDeletionEnabled()); - LOG.info("isDisableAllIndexesEnabled=" + context.isDisableAllIndexesEnabled()); - LOG.info("isDropAllIndexesEnabled=" + context.isDropAllIndexesEnabled()); - LOG.info("isFailOnErrorEnabled=" + context.isFailOnErrorEnabled()); - LOG.info("isIncrementalModeEnabled=" + context.isIncrementalModeEnabled()); - LOG.info("isMigrationTriggeredByUpdateProcess=" + context.isMigrationTriggeredByUpdateProcess()); - LOG.info("isRemoveMissingColumnsToSchemaEnabled=" + context.isRemoveMissingColumnsToSchemaEnabled()); - LOG.info("isRemoveMissingTablesToSchemaEnabled=" + context.isRemoveMissingTablesToSchemaEnabled()); - LOG.info("isSchemaMigrationAutoTriggerEnabled=" + context.isSchemaMigrationAutoTriggerEnabled()); - LOG.info("isSchemaMigrationEnabled=" + context.isSchemaMigrationEnabled()); - LOG.info("isTruncateEnabled=" + context.isTruncateEnabled()); - LOG.info("getIncludedTables=" + context.getIncludedTables()); - LOG.info("getExcludedTables=" + context.getExcludedTables()); - LOG.info("getIncrementalTables=" + context.getIncrementalTables()); - LOG.info("getTruncateExcludedTables=" + context.getTruncateExcludedTables()); - LOG.info("getCustomTables=" + context.getCustomTables()); - LOG.info("getIncrementalTimestamp=" + context.getIncrementalTimestamp()); - LOG.info( - "Source TS Name=" + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Source TS Suffix=" - + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info( - "Target TS Name=" + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Target TS Suffix=" - + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info("getItemTypeViewNamePattern=" + context.getItemTypeViewNamePattern()); - - LOG.info("--------MIGRATION CONTEXT- END----------"); } public void setDatabaseSchemaDifferenceTaskRepository( diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java index 5ec21be..c229a63 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java @@ -95,21 +95,31 @@ public interface DatabaseCopyTaskRepository { * the nodeId to perform the copy * @throws Exception */ - void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long sourceRowCount, int targetNode) - throws Exception; + void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long itemOrder, long sourceRowCount, + int targetNode) throws Exception; void rescheduleTask(CopyContext context, String pipelineName, int targetNodeId) throws Exception; void scheduleBatch(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId, Object lowerBoundary, Object upperBoundary) throws Exception; - void markBatchCompleted(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId) throws Exception; + void scheduleBatch(CopyContext context, DataCopyItem copyItem, int batchId, Object lowerBoundary, + Object upperBoundary, String partition) throws Exception; + + void markBatchCompleted(CopyContext context, DataCopyItem copyItem, int batchId) throws Exception; + + void markBatchCompleted(CopyContext context, DataCopyItem copyItem, int batchId, String partition) throws Exception; void resetPipelineBatches(CopyContext context, CopyContext.DataCopyItem copyItem) throws Exception; + void resetPipelineBatches(CopyContext context, DataCopyItem copyItem, String partition) throws Exception; + Set findPendingBatchesForPipeline(CopyContext context, CopyContext.DataCopyItem item) throws Exception; + Set findPendingBatchesForPipeline(CopyContext context, DataCopyItem item, String partition) + throws Exception; + Optional findPipeline(CopyContext context, CopyContext.DataCopyItem dataCopyItem) throws Exception; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java index af0acfc..baeadb2 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java @@ -14,7 +14,6 @@ import com.sap.cx.boosters.commercedbsync.performance.PerformanceCategory; import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; -import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import com.sap.cx.boosters.commercedbsync.service.DataCopyChunk; import de.hybris.platform.servicelayer.cluster.ClusterService; import org.apache.commons.lang3.StringUtils; @@ -39,7 +38,6 @@ import java.util.Calendar; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Map; import java.util.Objects; @@ -63,6 +61,7 @@ public class DefaultDatabaseCopyTaskRepository implements DatabaseCopyTaskReposi private static final String TABLECOPYSTATUS = MIGRATION_TABLESPREFIX + "TABLECOPYSTATUS"; private static final String TABLECOPYTASKS = MIGRATION_TABLESPREFIX + "TABLECOPYTASKS"; private static final String TABLECOPYBATCHES = MIGRATION_TABLESPREFIX + "TABLECOPYBATCHES"; + private static final String TABLECOPYBATCHES_PART = MIGRATION_TABLESPREFIX + "TABLECOPYBATCHES_PART"; @Override public String getMostRecentMigrationID(MigrationContext context) { @@ -182,23 +181,24 @@ private LocalDateTime getDateTime(ResultSet rs, String column) throws Exception } @Override - public synchronized void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long sourceRowCount, - int targetNode) throws Exception { + public synchronized void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long itemOrder, + long sourceRowCount, int targetNode) throws Exception { String insert = "INSERT INTO " + TABLECOPYTASKS - + " (targetnodeid, pipelinename, sourcetablename, targettablename, columnmap, migrationid, sourcerowcount, batchsize, lastupdate, chunked, chunksize, chunknumber) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + " (targetnodeid, pipelinename, itemorder, sourcetablename, targettablename, columnmap, migrationid, sourcerowcount, batchsize, lastupdate, chunked, chunksize, chunknumber) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { stmt.setObject(1, targetNode); stmt.setObject(2, copyItem.getPipelineName()); - stmt.setObject(3, copyItem.getSourceItem()); - stmt.setObject(4, copyItem.getTargetItem()); - stmt.setObject(5, new Gson().toJson(copyItem.getColumnMap())); - stmt.setObject(6, context.getMigrationId()); - stmt.setObject(7, sourceRowCount); - stmt.setObject(8, copyItem.getBatchSize()); - setTimestamp(stmt, 9, now()); - stmt.setObject(10, copyItem.getChunkData() != null ? "1" : "0"); - stmt.setObject(11, (copyItem.getChunkData() != null ? copyItem.getChunkData().getChunkSize() : null)); - stmt.setObject(12, (copyItem.getChunkData() != null ? copyItem.getChunkData().getCurrentChunk() : null)); + stmt.setObject(3, context.getMigrationContext().isTablesOrdered() ? itemOrder : 0); + stmt.setObject(4, copyItem.getSourceItem()); + stmt.setObject(5, copyItem.getTargetItem()); + stmt.setObject(6, new Gson().toJson(copyItem.getColumnMap())); + stmt.setObject(7, context.getMigrationId()); + stmt.setObject(8, sourceRowCount); + stmt.setObject(9, copyItem.getBatchSize()); + setTimestamp(stmt, 10, now()); + stmt.setObject(11, copyItem.getChunkData() != null ? "1" : "0"); + stmt.setObject(12, (copyItem.getChunkData() != null ? copyItem.getChunkData().getChunkSize() : null)); + stmt.setObject(13, (copyItem.getChunkData() != null ? copyItem.getChunkData().getCurrentChunk() : null)); stmt.executeUpdate(); } } @@ -233,6 +233,27 @@ public synchronized void scheduleBatch(CopyContext context, CopyContext.DataCopy } } + @Override + public void scheduleBatch(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId, Object lowerBoundary, + Object upperBoundary, String partition) throws Exception { + if (partition == null) { + scheduleBatch(context, copyItem, batchId, lowerBoundary, upperBoundary); + } else { + LOG.debug("Schedule Batch for {} with ID {}, partition {}", copyItem.getPipelineName(), batchId, partition); + String insert = "INSERT INTO " + TABLECOPYBATCHES_PART + + " (migrationId, batchId, pipelinename, lowerBoundary, upperBoundary, partition) VALUES (?, ?, ?, ?, ?, ?)"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, batchId); + stmt.setObject(3, copyItem.getPipelineName()); + stmt.setObject(4, lowerBoundary); + stmt.setObject(5, upperBoundary); + stmt.setObject(6, partition); + stmt.executeUpdate(); + } + } + } + @Override public synchronized void markBatchCompleted(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId) throws Exception { @@ -249,6 +270,29 @@ public synchronized void markBatchCompleted(CopyContext context, CopyContext.Dat } } + @Override + public void markBatchCompleted(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId, + final String partition) throws Exception { + if (partition == null) { + markBatchCompleted(context, copyItem, batchId); + } else { + LOG.debug("Mark batch completed for {} with ID {}", copyItem.getPipelineName(), batchId); + String insert = "DELETE FROM " + TABLECOPYBATCHES_PART + + " WHERE migrationId = ? AND batchId = ? AND pipelinename = ? AND partition = ?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, batchId); + stmt.setObject(3, copyItem.getPipelineName()); + stmt.setObject(4, partition); + // exactly one batch record should be affected + if (stmt.executeUpdate() != 1) { + throw new IllegalStateException("No (exact) match for batch with id '" + batchId + "' found."); + } + } + } + + } + @Override public synchronized void resetPipelineBatches(CopyContext context, CopyContext.DataCopyItem copyItem) throws Exception { @@ -260,6 +304,23 @@ public synchronized void resetPipelineBatches(CopyContext context, CopyContext.D } } + @Override + public void resetPipelineBatches(CopyContext context, CopyContext.DataCopyItem copyItem, String partition) + throws Exception { + if (partition == null) { + resetPipelineBatches(context, copyItem); + } else { + String insert = "DELETE FROM " + TABLECOPYBATCHES_PART + + " WHERE migrationId = ? AND pipelinename = ? AND partition = ?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, copyItem.getPipelineName()); + stmt.setObject(3, partition); + stmt.executeUpdate(); + } + } + } + @Override public Set findPendingBatchesForPipeline(CopyContext context, CopyContext.DataCopyItem item) throws Exception { @@ -275,6 +336,26 @@ public Set findPendingBatchesForPipeline(CopyContext context, } } + @Override + public Set findPendingBatchesForPipeline(CopyContext context, CopyContext.DataCopyItem item, + String partition) throws Exception { + if (partition == null) { + return findPendingBatchesForPipeline(context, item); + } else { + String sql = "SELECT * FROM " + TABLECOPYBATCHES_PART + + " WHERE migrationid = ? AND pipelinename = ? and partition = ? ORDER BY batchId ASC"; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, item.getPipelineName()); + stmt.setObject(3, partition); + try (ResultSet resultSet = stmt.executeQuery()) { + return convertToBatch(resultSet); + } + } + } + } + private Timestamp now() { Instant now = java.time.Instant.now(); Timestamp ts = new Timestamp(now.toEpochMilli()); @@ -286,14 +367,11 @@ private Connection getConnection(CopyContext context) throws Exception { } private Connection getConnection(MigrationContext context) throws Exception { - final DataRepository repository = !context.isDataExportEnabled() - ? context.getDataTargetRepository() - : context.getDataSourceRepository(); /* * if (!repository.getDatabaseProvider().isMssqlUsed()) { throw new * IllegalStateException("Scheduler tables requires MSSQL database"); } */ - return repository.getConnection(); + return context.getDataRepository().getConnection(); } @Override @@ -335,8 +413,9 @@ public boolean findInAllPipelines(CopyContext context, CopyContext.DataCopyItem @Override public Set findPendingTasks(CopyContext context) throws Exception { + final String orderBy = context.getMigrationContext().isTablesOrdered() ? "itemorder" : "sourcerowcount"; String sql = "SELECT * FROM " + TABLECOPYTASKS - + " WHERE targetnodeid=? AND migrationid=? AND duration IS NULL ORDER BY sourcerowcount"; + + " WHERE targetnodeid=? AND migrationid=? AND duration IS NULL ORDER BY " + orderBy; try (Connection connection = getConnection(context); PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, getTargetNodeId()); @@ -349,8 +428,9 @@ public Set findPendingTasks(CopyContext context) throws Except @Override public Set findFailedTasks(CopyContext context) throws Exception { + final String orderBy = context.getMigrationContext().isTablesOrdered() ? "itemorder" : "sourcerowcount"; String sql = "SELECT * FROM " + TABLECOPYTASKS - + " WHERE migrationid=? AND duration = '-1' AND failure = '1' ORDER BY sourcerowcount"; + + " WHERE migrationid=? AND duration = '-1' AND failure = '1' ORDER BY " + orderBy; try (Connection connection = getConnection(context); PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, context.getMigrationId()); @@ -527,7 +607,7 @@ public void setClusterService(ClusterService clusterService) { } private Set convertToTask(ResultSet rs) throws Exception { - Set copyTasks = new HashSet<>(); + Set copyTasks = new LinkedHashSet<>(); while (rs.next()) { DatabaseCopyTask copyTask = new DatabaseCopyTask(); copyTask.setTargetnodeId(rs.getInt("targetnodeId")); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java index e702db0..c31e675 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java @@ -56,9 +56,8 @@ public class DefaultDatabaseMigrationService implements DatabaseMigrationService @Override public void prepareMigration(MigrationContext context) throws Exception { - final DataRepository repository = !context.isDataExportEnabled() - ? context.getDataTargetRepository() - : context.getDataSourceRepository(); + final DataRepository repository = context.getDataRepository(); + final DataBaseProvider databaseProvider = repository.getDatabaseProvider(); final ClassPathResource scriptResource = new ClassPathResource( String.format("/sql/createSchedulerTables%s.sql", databaseProvider)); @@ -87,7 +86,7 @@ public String startMigration(final MigrationContext context, LaunchOptions launc return runningMigrationStatus.getMigrationID(); } - if (!context.isDataExportEnabled()) { + if (!context.isDataSynchronizationEnabled()) { TaskEngine engine = taskService.getEngine(); boolean running = engine.isRunning(); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java index 7af6fa9..93c1dbc 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java @@ -34,6 +34,7 @@ import org.apache.ddlutils.model.Column; import org.apache.ddlutils.model.Database; import org.apache.ddlutils.model.Table; +import org.apache.ddlutils.platform.SqlBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; @@ -41,6 +42,8 @@ import javax.annotation.Nullable; import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.time.LocalDateTime; @@ -80,15 +83,9 @@ public String generateSchemaDifferencesSql(MigrationContext context, schemaDifferenceResult); if (databaseModelWithChanges.isHasSchemaDiff()) { LOG.info("generateSchemaDifferencesSql..Schema Diff found - now to generate the SQLs "); - if (context.getDataTargetRepository().getDatabaseProvider().isHanaUsed()) { - schemaSql = context.getDataTargetRepository().asPlatform().getAlterTablesSql(null, - context.getDataTargetRepository().getDataSourceConfiguration().getSchema(), null, - databaseModelWithChanges.getDatabase()); - } else { - schemaSql = context.getDataTargetRepository().asPlatform() - .getAlterTablesSql(databaseModelWithChanges.getDatabase()); - } + schemaSql = processChanges(context, schemaDifferenceResult.getTargetSchema().getDatabase(), + databaseModelWithChanges.getDatabase()); schemaSql = postProcess(schemaSql, context); LOG.info("generateSchemaDifferencesSql - generated DDL ALTER SQLs. "); } @@ -98,6 +95,37 @@ public String generateSchemaDifferencesSql(MigrationContext context, return schemaSql; } + protected void preProcessDatabaseModel(final MigrationContext migrationContext, final Database model, + final Set tableCandidates) { + for (Table table : model.getTables()) { + final Optional matchingTableCandidate = tableCandidates.stream() + .filter(tableCandidate -> tableCandidate.getFullTableName().equals(table.getName())).findFirst(); + + if (matchingTableCandidate.isEmpty()) { + model.removeTable(table); + } else { + final String commonTableName = matchingTableCandidate.get().getCommonTableName(); + if (migrationContext.getExcludedColumns().containsKey(commonTableName)) { + migrationContext.getExcludedColumns().get(commonTableName).forEach(col -> { + final Column excludedColumn = table.findColumn(col); + table.removeColumn(excludedColumn); + }); + } + } + } + } + + private String processChanges(final MigrationContext context, final Database currentModel, + final Database desiredModel) throws IOException { + final SqlBuilder sqlBuilder = context.getDataTargetRepository().asPlatform().getSqlBuilder(); + + try (StringWriter buffer = new StringWriter()) { + sqlBuilder.setWriter(buffer); + sqlBuilder.alterDatabase(currentModel, desiredModel, null); + return buffer.toString(); + } + } + /* * ORACLE_TARGET - START This a TEMP fix, it is difficlt to get from from Sql * Server NVARCHAR(255), NVARCHAR(MAX) to convert properly into to Orcale's @@ -199,7 +227,7 @@ protected DatabaseStatus getDatabaseModelWithChanges4TableCreation(final Migrati return dbStatus; } final SchemaDifference targetDiff = differenceResult.getTargetSchema(); - final Database database = (Database) targetDiff.getDatabase().clone(); + final Database database = getDatabaseModelClone(targetDiff.getDatabase()); // add missing tables in target if (migrationContext.isAddMissingTablesToSchemaEnabled()) { @@ -244,14 +272,8 @@ protected DatabaseStatus getDatabaseModelWithChanges4TableCreation(final Migrati final SchemaDifference sourceDiff = differenceResult.getSourceSchema(); final List missingTables = sourceDiff.getMissingTables(); for (final TableKeyPair missingTable : missingTables) { - final Table tableClone = (Table) differenceResult.getTargetSchema().getDatabase() - .findTable(missingTable.getLeftName(), false).clone(); - tableClone.setName(missingTable.getRightName()); - tableClone.setCatalog( - migrationContext.getDataTargetRepository().getDataSourceConfiguration().getCatalog()); - tableClone - .setSchema(migrationContext.getDataTargetRepository().getDataSourceConfiguration().getSchema()); - database.removeTable(tableClone); + final Table table = database.findTable(missingTable.getLeftName(), false); + database.removeTable(table); LOG.info("getDatabaseModelWithChanges4TableCreation - missingTable.getRightName() =" + missingTable.getRightName() + ", missingTable.getLeftName() = " + missingTable.getLeftName()); } @@ -280,6 +302,23 @@ protected DatabaseStatus getDatabaseModelWithChanges4TableCreation(final Migrati return dbStatus; } + /* + * Database.clone() does not clone tables properly and adding a new column would + * result in it being present in both: cloned, and origin db models. + */ + protected Database getDatabaseModelClone(final Database model) throws CloneNotSupportedException { + final Database database = new Database(); + database.setName(model.getName()); + database.setIdMethod(model.getIdMethod()); + database.setIdMethod(model.getIdMethod()); + database.setVersion(model.getVersion()); + for (final Table table : model.getTables()) { + database.addTable((Table) table.clone()); + } + + return database; + } + protected void writeReport(final String differenceSql) { try { final String fileName = String.format("schemaChanges-%s.sql", LocalDateTime.now().getNano()); @@ -304,13 +343,20 @@ public SchemaDifferenceResult getSchemaDifferenceFromStatus(final MigrationConte @Override public SchemaDifferenceResult createSchemaDifferenceResult(final MigrationContext migrationContext) throws Exception { - final SchemaDifference sourceSchemaDifference = getSchemaDifference(migrationContext, true); - final SchemaDifference targetSchemaDifference = getSchemaDifference(migrationContext, false); + final Set sourceTableCandidates = getTables(migrationContext, + copyItemProvider.getSourceTableCandidates(migrationContext)); + final Set targetTableCandidates = getTables(migrationContext, + copyItemProvider.getTargetTableCandidates(migrationContext)); + final SchemaDifference sourceSchemaDifference = getSchemaDifference(migrationContext, true, + sourceTableCandidates, targetTableCandidates); + final SchemaDifference targetSchemaDifference = getSchemaDifference(migrationContext, false, + sourceTableCandidates, targetTableCandidates); return new SchemaDifferenceResult(sourceSchemaDifference, targetSchemaDifference); } private SchemaDifference getSchemaDifference(final MigrationContext migrationContext, - final boolean useTargetAsRefDatabase) throws Exception { + final boolean useTargetAsRefDatabase, final Set sourceTableCandidates, + final Set targetTableCandidates) throws Exception { final DataRepository leftRepository = useTargetAsRefDatabase ? migrationContext.getDataTargetRepository() : migrationContext.getDataSourceRepository(); @@ -321,11 +367,11 @@ private SchemaDifference getSchemaDifference(final MigrationContext migrationCon LOG.info("computing SCHEMA diff, REF DB = " + leftRepository.getDatabaseProvider().getDbName() + " vs Checking in DB = " + rightRepository.getDatabaseProvider().getDbName()); - final Set tableCandidates = useTargetAsRefDatabase - ? copyItemProvider.getTargetTableCandidates(migrationContext) - : copyItemProvider.getSourceTableCandidates(migrationContext); - - return computeDiff(migrationContext, leftRepository, rightRepository, tableCandidates); + return useTargetAsRefDatabase + ? computeDiff(migrationContext, leftRepository, rightRepository, targetTableCandidates, + sourceTableCandidates) + : computeDiff(migrationContext, leftRepository, rightRepository, sourceTableCandidates, + targetTableCandidates); } @Override @@ -554,10 +600,12 @@ protected String getSchemaDifferencesAsJson(final SchemaDifferenceResult schemaD } protected SchemaDifference computeDiff(final MigrationContext context, final DataRepository leftRepository, - final DataRepository rightRepository, final Set leftCandidates) { - final SchemaDifference schemaDifference = new SchemaDifference(rightRepository.asDatabase(), + final DataRepository rightRepository, final Set leftCandidates, + final Set rightCandidates) throws CloneNotSupportedException { + final Database database = (Database) rightRepository.asDatabase().clone(); + preProcessDatabaseModel(context, database, rightCandidates); + final SchemaDifference schemaDifference = new SchemaDifference(database, rightRepository.getDataSourceConfiguration().getTablePrefix()); - final Set leftDatabaseTables = getTables(context, leftCandidates); LOG.info("LEFT Repo = " + leftRepository.getDatabaseProvider().getDbName()); LOG.info("RIGHT Repo = " + rightRepository.getDatabaseProvider().getDbName()); @@ -571,7 +619,7 @@ protected SchemaDifference computeDiff(final MigrationContext context, final Dat } // LOG.info(" -------------------------------"); - for (final TableCandidate leftCandidate : leftDatabaseTables) { + for (final TableCandidate leftCandidate : leftCandidates) { LOG.info(" Checking if Left Table exists --> " + leftCandidate.getFullTableName()); final Table leftTable = leftRepository.asDatabase().findTable(leftCandidate.getFullTableName(), false); if (leftTable == null) { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceTaskRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceTaskRepository.java index 291156c..7b9930d 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceTaskRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceTaskRepository.java @@ -202,7 +202,7 @@ private Timestamp now() { private Connection getConnection(SchemaDifferenceContext context) throws Exception { final MigrationContext migrationContext = context.getMigrationContext(); - final DataRepository repository = !migrationContext.isDataExportEnabled() + final DataRepository repository = !migrationContext.isDataSynchronizationEnabled() ? migrationContext.getDataTargetRepository() : migrationContext.getDataSourceRepository(); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java index f6acaf8..6b56a3e 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java @@ -83,12 +83,13 @@ public void write(CopyContext context, DataPipe pipe, CopyContext.DataC nullifyColumns.addAll(context.getMigrationContext().getNullifyColumns().get(targetTableName)); LOG.info("Nullify column(s): {}", nullifyColumns); } - + final String query = isTopSupported(context) + ? "select TOP 1 * from %s where 0 = 1" + : "select * from %s where 0 = 1"; List columnsToCopy = new ArrayList<>(); try (Connection sourceConnection = context.getMigrationContext().getDataSourceRepository().getConnection(); Statement stmt = sourceConnection.createStatement(); - ResultSet metaResult = stmt - .executeQuery(String.format("select * from %s where 0 = 1", item.getSourceItem()))) { + ResultSet metaResult = stmt.executeQuery(String.format(query, item.getSourceItem()))) { ResultSetMetaData sourceMeta = metaResult.getMetaData(); int columnCount = sourceMeta.getColumnCount(); for (int i = 1; i <= columnCount; i++) { @@ -307,4 +308,9 @@ private RetriableTask createWriterTask(CopyPipeWriterContext dwc, DataSet dataSe return new CopyPipeWriterTask(dwc, dataSet, anonymizerConfigurator.getConfiguration()); } } + + private boolean isTopSupported(final CopyContext context) { + return context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isMssqlUsed() + || context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isHanaUsed(); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java index 5f7b157..3cef7cd 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java @@ -357,7 +357,7 @@ private void process() throws Exception { bulkWriterStatement.clearBatch(); connection.commit(); ctx.getDatabaseCopyTaskRepository().markBatchCompleted(ctx.getContext(), ctx.getCopyItem(), - dataSet.getBatchId()); + dataSet.getBatchId(), dataSet.getPartition()); long totalCount = ctx.getTotalCount().addAndGet(batchCount); ctx.getDatabaseCopyTaskRepository().updateTaskProgress(ctx.getContext(), ctx.getCopyItem(), totalCount); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java index 5325ee7..d71b10d 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java @@ -12,6 +12,7 @@ import com.sap.cx.boosters.commercedbsync.dataset.DataSet; import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; +import de.hybris.bootstrap.ddl.DataBaseProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,10 +62,18 @@ private void process() throws Exception { originalAutoCommit = connection.getAutoCommit(); String sqlDelete; - if (ctx.getContext().getMigrationContext().getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { - sqlDelete = getBulkDeleteStatementOracle(ctx.getCopyItem().getTargetItem(), PK); + String targetItem = ctx.getCopyItem().getTargetItem(); + DataBaseProvider dbProvider = ctx.getContext().getMigrationContext().getDataTargetRepository() + .getDatabaseProvider(); + + if (dbProvider.isOracleUsed()) { + sqlDelete = getBulkDeleteStatementOracle(targetItem, PK); + } else if (dbProvider.isPostgreSqlUsed()) { + sqlDelete = getBulkDeleteStatementPostgreSql(targetItem, PK); + } else if (dbProvider.isMySqlUsed()) { + sqlDelete = getBulkDeleteStatementMySql(targetItem, PK); } else { - sqlDelete = getBulkDeleteStatement(ctx.getCopyItem().getTargetItem(), PK); + sqlDelete = getBulkDeleteStatement(targetItem, PK); } try (PreparedStatement bulkWriterStatement = connection.prepareStatement(sqlDelete)) { @@ -148,4 +157,26 @@ private String getBulkDeleteStatementOracle(final String targetTableName, final LOG.debug("MERGE-DELETE ORACLE " + sqlBuilder); return sqlBuilder.toString(); } + + private String getBulkDeleteStatementMySql(final String targetTableName, final String columnId) { + final StringBuilder sqlBuilder = new StringBuilder(); + sqlBuilder.append(String.format("DELETE t FROM %s t", targetTableName)); + sqlBuilder.append("\n"); + sqlBuilder.append(String.format("JOIN (SELECT ? AS %s) s ON t.%s = s.%s", columnId, columnId, columnId)); + LOG.debug("DELETE MYSQL " + sqlBuilder); + return sqlBuilder.toString(); + } + + // PostgreSql >= 15 + private String getBulkDeleteStatementPostgreSql(final String targetTableName, final String columnId) { + final StringBuilder sqlBuilder = new StringBuilder(); + sqlBuilder.append(String.format("MERGE INTO %s AS t", targetTableName)); + sqlBuilder.append("\n"); + sqlBuilder.append(String.format("USING (SELECT ? AS %s) AS s ON (t.%s = s.%s)", columnId, columnId, columnId)); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN MATCHED THEN DELETE"); + sqlBuilder.append(";"); + LOG.debug("MERGE-DELETE PostgreSQL: " + sqlBuilder); + return sqlBuilder.toString(); + } } diff --git a/commercedbsynchac/hac/resources/static/js/schemaCopy.js b/commercedbsynchac/hac/resources/static/js/schemaCopy.js index 9c59739..a63dfb9 100644 --- a/commercedbsynchac/hac/resources/static/js/schemaCopy.js +++ b/commercedbsynchac/hac/resources/static/js/schemaCopy.js @@ -225,10 +225,10 @@ function fillResult(status) { sourceSchemaDiffTable.fnClearTable(); if(status.diffResult.target.results.length > 0) { - targetSchemaDiffTable.fnAddData(status.diffResult.target.results.map((result) => [result[0], result[2]])); + targetSchemaDiffTable.fnAddData(status.diffResult.target.results.map((result) => [result[1], result[2]])); } if(status.diffResult.source.results.length > 0) { - sourceSchemaDiffTable.fnAddData(status.diffResult.source.results.map((result) => [result[0], result[2]])); + sourceSchemaDiffTable.fnAddData(status.diffResult.source.results.map((result) => [result[1], result[2]])); } sqlQueryEditor.setValue(status.sqlScript); diff --git a/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java b/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java index c7cde75..4e46921 100644 --- a/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java +++ b/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java @@ -20,6 +20,7 @@ import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationService; import com.sap.cx.boosters.commercedbsync.service.DatabaseSchemaDifferenceService; import com.sap.cx.boosters.commercedbsync.service.impl.BlobDatabaseMigrationReportStorageService; +import com.sap.cx.boosters.commercedbsync.utils.LocalDateTypeAdapter; import com.sap.cx.boosters.commercedbsync.utils.MaskUtil; import com.sap.cx.boosters.commercedbsynchac.metric.MetricService; import de.hybris.platform.commercedbsynchac.data.*; @@ -150,7 +151,7 @@ public String data(final Model model) { model.addAttribute("isLogSql", BooleanUtils.toBooleanDefaultIfNull(migrationContext.isLogSql(), DEFAULT_BOOLEAN_VAL)); model.addAttribute("isSchedulerResumeEnabled", migrationContext.isSchedulerResumeEnabled()); - model.addAttribute("isDataExportEnabled", migrationContext.isDataExportEnabled()); + model.addAttribute("isDataExportEnabled", migrationContext.isDataSynchronizationEnabled()); return "dataCopy"; } @@ -291,7 +292,7 @@ public String abortSchema() throws Exception { @RequestMapping(value = "/copyData", method = RequestMethod.POST, produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public MigrationStatus copyData(@RequestParam Map copyConfig) throws Exception { - if (migrationContext.isDataExportEnabled()) { + if (migrationContext.isDataSynchronizationEnabled()) { throw new IllegalStateException("Migration cannot be started from HAC"); } @@ -330,7 +331,7 @@ public MigrationStatus copyData(@RequestParam Map copyConf @RequestMapping(value = "/abortCopy", method = RequestMethod.PUT, produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String abortCopy(@RequestBody String migrationID) throws Exception { - if (migrationContext.isDataExportEnabled()) { + if (migrationContext.isDataSynchronizationEnabled()) { throw new IllegalStateException("Migration cannot be aborted from HAC"); } @@ -401,7 +402,11 @@ private Long convertToEpoch(LocalDateTime time) { throws Exception { logAction("Download migration report button clicked"); response.setHeader("Content-Disposition", "attachment; filename=migration-report.json"); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); + final GsonBuilder gsonBuilder = new GsonBuilder(); + gsonBuilder.setPrettyPrinting(); + gsonBuilder.registerTypeAdapter(LocalDateTime.class, new LocalDateTypeAdapter()); + gsonBuilder.disableHtmlEscaping(); + Gson gson = gsonBuilder.create(); String json = gson.toJson(databaseMigrationService.getMigrationReport(migrationContext, migrationId)); return json.getBytes(StandardCharsets.UTF_8.name()); } diff --git a/commercedbsynchac/project.properties b/commercedbsynchac/project.properties index ae90c6a..cd1fd56 100644 --- a/commercedbsynchac/project.properties +++ b/commercedbsynchac/project.properties @@ -7,6 +7,7 @@ commercedbsynchac.key=value # Specifies the location of the spring context file putted automatically to the global platform application context. commercedbsynchac.application-context=commercedbsynchac-spring.xml migration.from.hac.enabled=true +configuration.view.blacklist.migration=${migration.properties.masked} ## fix for Config Panel rendering error due to: "EvalError: Refused to evaluate a string as JavaScript because 'unsafe-eval' is not an allowed" at static/js/configPanel.js:169 hac.xss.filter.header.Content-Security-Policy=default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; script-src 'self' 'unsafe-inline' 'unsafe-eval' \ No newline at end of file diff --git a/docs/configuration/CONFIGURATION-GUIDE.md b/docs/configuration/CONFIGURATION-GUIDE.md index 7226a54..8314fdb 100644 --- a/docs/configuration/CONFIGURATION-GUIDE.md +++ b/docs/configuration/CONFIGURATION-GUIDE.md @@ -1,5 +1,10 @@ # SAP Commerce DB Sync - Configuration Guide +## DB Sync flows diagram + +![db sync flows diagram](../../docs/user/db-sync-flows.svg) + + ## Configuration reference [Configuration Reference](CONFIGURATION-REFERENCE.md) To get an overview of the configurable properties. diff --git a/docs/configuration/CONFIGURATION-REFERENCE.md b/docs/configuration/CONFIGURATION-REFERENCE.md index d4ca5de..b79a002 100644 --- a/docs/configuration/CONFIGURATION-REFERENCE.md +++ b/docs/configuration/CONFIGURATION-REFERENCE.md @@ -2,91 +2,96 @@ # SAP Commerce DB Sync - Configuration Reference -| Property | Description | Default | values | optional | dependency | -|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------| --- | --- | --- | -| migration.cluster.enabled | Run migration in the cluster (based on commerce cluster config). The 'HAC' node will be the primary one. A scheduling algorithm decides which table will run on which node. Nodes are notified using cluster events. | `false` | true or false | true | | -| migration.data.columns.excluded.attributedescriptors | Specifies the columns to be excluded | | migration.data.columns.excluded.[tablename]=[comma separated list of column names] | true | | -| migration.data.columns.nullify.attributedescriptors | Specifies the columns to be nullified. Whatever value there was will be replaced with NULL in the target column. | | migration.data.columns.nullify.[tablename]=[comma separated list of column names] | true | | -| migration.data.export.enabled | Activate data export to external DB via cron jobs | `false` | true or false | true | | -| migration.data.failonerror.enabled | If set to true, the migration will abort as soon as an error occured. If set to false, the migration will try to continue if the state of the runtime allows. | `true` | true or false | true | | -| migration.data.filestorage.container.name | Specifies the name of the container where the tool will store the files related to migration in the blob storage pointed by the property {migration.data.report.connectionstring} | `migration` | any string | false | | -| migration.data.fulldatabase.enabled | Specifies if full database migration is enabled. | `true` | true or false | false | | -| migration.data.incremental.enabled | If set to true, the migration will run in incremental mode. Only rows that were modified after a given timestamp will be taken into account. | `false` | true or false | true | | -| migration.data.incremental.tables | Only these tables will be taken into account for incremental migration. | | comma separated list of tables. | true | migration.data.incremental.enabled | -| migration.data.incremental.timestamp | Records created or modified after this timestamp will be copied only. | | The timestamp in ISO-8601 ISO_ZONED_DATE_TIME format | true | migration.data.incremental.enabled | -| migration.data.indices.disable.enabled | If set to true, all indices in the target table will be disabled (NOT removed) before copying over the data. After the data copy the indices will be enabled and rebuilt again. | `false` | true or false | true | | -| migration.data.indices.disable.included | If disabling of indices is enabled, this property specifies the tables that should be included. If no tables specified, indices for all tables will be disabled. | | comma separated list of tables | true | migration.data.indices.disable.enabled | -| migration.data.indices.drop.enabled | If set to true, all indices in the target table will be removed before copying over the data. | `false` | true or false | true | | -| migration.data.indices.drop.recreate.exclude | do not recreate following indices after the migration. Comma separated values | | comma separated values | true | | -| migration.data.maxparalleltablecopy | Specifies the number of tables that are copied over in parallel. | `2` | integer value | true | | -| migration.data.mssql.update.statistics.enabled | If set to true, runs UPDATE STATISTICS against all user-defined and internal tables in the target database after successful data migration. Applies only for MS SQL target database. | `false` | true or false | false | | -| migration.data.pipe.capacity | Specifies the capacity of the data pipe. | `100` | integer value | true | | -| migration.data.pipe.timeout | Specifies the timeout of the data pipe. | `7200` | integer value | true | | -| migration.cluster.chunk.size | If set to any positive value enables chunking globally. Specifies the number of rows to read per chunk. Each split represents `virtual` table that is processed in separate pipeline. Only taken into account if higher then `(int) Math.ceil(rowCount / numNodes / batchSize) * batchSize)` | `-1` | any number | false | migration.cluster.chunking.enabled | -| migration.cluster.chunk.size.TABLE | Same as `migration.cluster.chunk.size` but set per table. | | any number | true | migration.cluster.chunking.enabled | -| migration.data.reader.batchsize | Specifies the number of rows to read per batch. This only affects tables which can be batched. | `1000` | integer value | true | | -| migration.data.reader.batchsize.TABLE | Table individual batch size for reading data from source enabling tuning on read speed vs. memory usage. Replace the TABLE with the source table name without prefix. | | integer value | true | | -| migration.data.report.connectionstring | Specifies blob storage connection string for storing reporting files. | `${media.globalSettings.cloudAzureBlobStorageStrategy.connection}` | any azure blob storage connection string | true | | -| migration.data.tables.audit.enabled | Flag to enable the migration of audit tables. | `true` | true or false | true | | -| migration.data.tables.custom | Specifies a list of custom tables to migrate. Custom tables are tables that are not part of the commerce type system. | | comma separated list of table names. | true | | -| migration.data.tables.excluded | Tables to exclude from migration (use table names name without prefix) | `SYSTEMINIT,StoredHttpSessions,itemdeletionmarkers,tasks_aux_queue,tasks_aux_scheduler,tasks_aux_workers` | comma separated list of table names. | true | | -| migration.data.tables.included | Tables to include (use table names name without prefix) | | comma separated list of table names. | true | | -| migration.data.truncate.enabled | Specifies if the target tables should be truncated before data is copied over. | `true` | true or false | true | | -| migration.data.truncate.excluded | If truncation of target tables is enabled, this property specifies tables that should be excluded from truncation. | | comma separated list of table names | true | migration.data.truncate.enabled | -| migration.data.view.name.pattern | Support views during data migration. String pattern for view naming convention with `'%s'` as table name. e.g. `v_%s` | `v_%s` | any string | true | | -| migration.data.view.t.TABLE.columnPrefix | Specifies the column prefix for specific table. | | any string | true | migration.data.view.t.TABLE.enabled | -| migration.data.view.t.TABLE.columnTransformation.COLUMN | Possibility to use custom functions to obfuscate values for specific columns | `GETDATE()` | any valid SQL function call | true | migration.data.view.t.TABLE.enabled | -| migration.data.view.t.TABLE.enabled | Activate DDL view generation for specific | `false` | any string | true | | -| migration.data.view.t.TABLE.joinWhereClause | Activate DDL view generation for specific _TABLE_, with additional `JOIN` clausule | `{table}` | any string | true | migration.data.view.t.TABLE.enabled | -| migration.data.workers.reader.maxtasks | Specifies the number of threads used per table to read data from source. Note that this value applies per table, so in total the number of threads will depend on 'migration.data.maxparalleltablecopy'. [total number of reader threads] = [migration.data.workers.reader.maxtasks] * [migration.data.maxparalleltablecopy] | `3` | integer value | true | migration.data.maxparalleltablecopy | -| migration.data.workers.retryattempts | Specifies the number of retries in case a worker task fails. | `0` | integer value | true | | -| migration.data.workers.writer.maxtasks | Specifies the number of threads used per table to write data to target. Note that this value applies per table, so in total the number of threads will depend on 'migration.data.maxparalleltablecopy'. [total number of writer threads] = [migration.data.workers.writer.maxtasks] * [migration.data.maxparalleltablecopy] | `10` | integer value | true | migration.data.maxparalleltablecopy | -| migration.ds.source.db.connection.pool.maxlifetime | Determines how long the source db connection can remain in the pool before it is closed and replaced, regardless of whether it is still active or idle. | `1800000` | any number | true | | -| migration.ds.source.db.connection.pool.size.active.max | Specifies maximum amount of active connections in the source db pool | `${db.pool.maxActive}` | integer value | false | | -| migration.ds.source.db.connection.pool.size.idle.max | Specifies maximum amount of connections in the source db pool | `${db.pool.maxIdle}` | integer value | false | | -| migration.ds.source.db.connection.pool.size.idle.min | Specifies minimum amount of idle connections available in the source db pool | `${db.pool.minIdle}` | integer value | false | | -| migration.ds.source.db.driver | Specifies the driver class for the source jdbc connection | | any valid jdbc driver class | false | | -| migration.ds.source.db.password | Specifies the password for the source jdbc connection | | any valid password for the jdbc connection | false | | -| migration.ds.source.db.schema | Specifies the schema the respective commerce installation is deployed to. | | any valid schema name for the commerce installation | false | | -| migration.ds.source.db.tableprefix | Specifies the table prefix used on the source commerce database. This may be relevant if a commerce installation was initialized using 'db.tableprefix'. | | any valid commerce database table prefix. | true | | -| migration.ds.source.db.typesystemname | Specifies the name of the type system that should be taken into account | `${db.type.system.name}` | any valid type system name | true | | -| migration.ds.source.db.typesystemsuffix | Specifies the suffix which is used for the source typesystem | | the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' | true | migration.ds.source.db.typesystemname | -| migration.ds.source.db.url | Specifies the url for the source jdbc connection | | any valid jdbc url | false | | -| migration.ds.source.db.username | Specifies the user name for the source jdbc connection | | any valid user name for the jdbc connection | false | | -| migration.ds.target.db.catalog | Specifies the catalog name for the target commerce database. | | any valid catalog name | true | | -| migration.ds.target.db.connection.pool.maxlifetime | Determines how long the target db connection can remain in the pool before it is closed and replaced, regardless of whether it is still active or idle. | `1800000` | any number | true | | -| migration.ds.target.db.connection.pool.size.active.max | Specifies maximum amount of connections in the target db pool | `${db.pool.maxActive}` | integer value | false | | -| migration.ds.target.db.connection.pool.size.idle.max | Specifies maximum amount of idle connections available in the target db pool | `${db.pool.maxIdle}` | integer value | false | | -| migration.ds.target.db.connection.pool.size.idle.min | Specifies minimum amount of idle connections available in the target db pool | `${db.pool.minIdle}` | integer value | false | | -| migration.ds.target.db.driver | Specifies the driver class for the target jdbc connection | `${db.driver}` | any valid jdbc driver class | false | | -| migration.ds.target.db.max.stage.migrations | When using the staged approach, multiple sets of commerce tables may exists (each having its own tableprefix). To prevent cluttering the db, this property specifies the maximum number of table sets that can exist, if exceeded the schema migrator will complain and suggest a cleanup. | `5` | integer value | true | | -| migration.ds.target.db.password | Specifies the password for the target jdbc connection | `${db.password}` | any valid password for the jdbc connection | false | | -| migration.ds.target.db.schema | Specifies the schema the target commerce installation is deployed to. | `dbo` | any valid schema name for the commerce installation | false | | -| migration.ds.target.db.tableprefix | Specifies the table prefix used on the target commerce database. This may be relevant if a commerce installation was initialized using `${db.tableprefix}` / staged approach. | `${db.tableprefix}` | any valid commerce database table prefix. | true | | -| migration.ds.target.db.typesystemname | Specifies the name of the type system that should be taken into account | `DEFAULT` | any valid type system name | true | | -| migration.ds.target.db.typesystemsuffix | Specifies the suffix which is used for the target typesystem | | the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' | true | migration.ds.source.db.typesystemname | -| migration.ds.target.db.url | Specifies the url for the target jdbc connection | `${db.url}` | any valid jdbc url | false | | -| migration.ds.target.db.username | Specifies the user name for the target jdbc connection | `${db.username}` | any valid user name for the jdbc connection | false | | -| migration.input.profiles | Specifies the profile name of data source that serves as migration input | `source` | name of the data source profile | true | | -| migration.locale.default | Specifies the default locale used. | `en-US` | any locale | true | | -| migration.log.sql | If set to true, the JDBC queries ran against the source and target data sources will be logged in the storage pointed by the property {migration.data.report.connectionstring} | `false` | true or false | false | | -| migration.log.sql.memory.flush.threshold.nbentries | Specifies the number of log entries to add to the in-memory collection of JDBC log entries of a JDBC queries store before flushing the collection contents into the blob file storage associated with the JDBC store's data souce and clearing the in-memory collection to free memory | `10000000` | an integer number | 10,000,000 | | -| migration.log.sql.source.showparameters | If set to true, the values of the parameters of the JDBC queries ran against the source data source will be logged in the JDBC queries logs (migration.log.sql has to be true to enable this type of logging). For security reasons, the tool will never log parameter values for the queries ran against the target datasource. | `true` | true or false | true | | -| migration.memory.attempts | Number of attempts to wait for free memory | `300` | any number | false | | -| migration.memory.min | Delays reading until a minimum amount of memory is available | `5000000` | any number | false | | -| migration.memory.wait | Number of time to wait for free memory (milliseconds) | `2000` | any number | false | | -| migration.media.container.prefix | Custom prefix for media container name, used instead of default `master` or `db.tableprefix`. Can be used to use medias from non-standard location| `${db.tableprefix}` | any string | true | | -| migration.media.container.suffix | Extra suffix added to media container name if needed. If configured container will be named as `sys-PREFIX-SUFFIX-name`| | any string | true | | -| migration.output.profiles | Specifies the profile name of data sources that serves as migration output | `target` | name of the data source profile | true | | -| migration.profiling | Activates enhanced memory usage logging | `false` | true or false | false | | -| migration.properties.masked | Specifies the properties that should be masked in HAC. | `migration.data.report.connectionstring,migration.ds.source.db.password,migration.ds.target.db.password` | any property key | true | | -| migration.scheduler.resume.enabled | If set to true, the migration will resume from where it stopped (either due to errors or cancellation). | `false` | true or false | true | | -| migration.schema.autotrigger.enabled | Specifies if the schema migrator should be automatically triggered before data copy process is started | `false` | true or false | true | migration.schema.enabled | -| migration.schema.enabled | Globally enables / disables schema migration. If set to false, no schema changes will be applied. | `true` | true or false | true | | -| migration.schema.target.columns.add.enabled | Specifies if columns which are missing in the target tables should be added by schema migration. | `true` | true or false | true | migration.schema.enabled | -| migration.schema.target.columns.remove.enabled | Specifies if extra columns in target tables (compared to source schema) should be removed by schema migration. | `true` | true or false | true | migration.schema.enabled | -| migration.schema.target.tables.add.enabled | Specifies if tables which are missing in the target should be added by schema migration. | `true` | true or false | true | migration.schema.enabled | -| migration.schema.target.tables.remove.enabled | Specifies if extra tables in target (compared to source schema) should be removed by schema migration. | `false` | true or false | true | migration.schema.enabled | -| migration.stalled.timeout | Specifies the timeout of the migration monitor. If there was no activity for too long the migration will be marked as 'stalled' and aborted. | `7200` | integer value | true | | -| migration.trigger.updatesystem | Specifies whether the data migration shall be triggered by the 'update running system' operation. | `false` | true or false | true | | -| migration.anonymizer.enabled | Enables / disables data anonymization. If set to false, no anonymization will be performed. | `false` | true or false | true | | +| Property | Description | Default | values | optional | dependency | +| --- | --- | --- | --- | --- | --- | +| migration.anonymizer.enabled | Enables / disables data anonymization. If set to false, no anonymization will be performed| `false` | true or false | false | | +| migration.cluster.chunk.size | If set to any positive value enables chunking globally. Specifies the number of rows to read per chunk. Each split represents `virtual` table that is processed in separate pipeline. Only taken into account if higher then `(int) Math.ceil(rowCount / numNodes / batchSize) * batchSize)`| `-1` | long value | false | | +| migration.cluster.chunk.size.TABLE | If set to any positive value enables chunking for specified TABLE. Specifies the number of rows to read per chunk. Each split represents `virtual` table that is processed in separate pipeline. Only taken into account if higher then `(int) Math.ceil(rowCount / numNodes / batchSize) * batchSize)` Replace the TABLE with the source table name without prefix.| | long value | true | | +| migration.cluster.enabled | Run migration in the cluster (based on commerce cluster config). The 'HAC' node will be the primary one. A scheduling algorithm decides which table will run on which node. Nodes are notified using cluster events.| `true` | true or false | true | | +| migration.data.columns.batch.TABLE | Configure columns to use when reading by offset| | comma separated list of column names | true | | +| migration.data.columns.excluded.attributedescriptors | Specifies the columns to be excluded| | migration.data.columns.excluded.[tablename]=[comma separated list of column names] | true | | +| migration.data.columns.nullify.attributedescriptors | Specifies the columns to be nullified. Whatever value there was will be replaced with NULL in the target column.| | migration.data.columns.nullify.[tablename]=[comma separated list of column names] | true | | +| migration.data.failonerror.enabled | If set to true, the migration will abort as soon as an error occured. If set to false, the migration will try to continue if the state of the runtime allows.| `true` | true or false | true | | +| migration.data.filestorage.container.name | Specifies the name of the container where the tool will store the files related to migration in the blob storage pointed by the property {migration.data.report.connectionstring}| `migration` | any string | false | | +| migration.data.fulldatabase.enabled | Specifies if full database migration is enabled.| `true` | true or false | false | | +| migration.data.incremental.enabled | If set to true, the migration will run in incremental mode. Only rows that were modified after a given timestamp will be taken into account.| `false` | true or false | true | | +| migration.data.incremental.tables | Only these tables will be taken into account for incremental migration.| | comma separated list of tables. | true | migration.data.incremental.enabled | +| migration.data.incremental.timestamp | Records created or modified after this timestamp will be copied only.| | The timestamp in ISO-8601 ISO_ZONED_DATE_TIME format | true | migration.data.incremental.enabled | +| migration.data.indices.disable.enabled | If set to true, all indices in the target table will be disabled (NOT removed) before copying over the data. After the data copy the indices will be enabled and rebuilt again.| `false` | true or false | true | | +| migration.data.indices.disable.included | If disabling of indices is enabled, this property specifies the tables that should be included. If no tables specified, indices for all tables will be disabled.| | comma separated list of tables | true | migration.data.indices.disable.enabled | +| migration.data.indices.drop.enabled | If set to true, all indices in the target table will be removed before copying over the data.| `false` | true or false | true | | +| migration.data.indices.drop.recreate.exclude | do not recreate following indices after the migration. Comma separated values| | comma separated values | true | | +| migration.data.maxparalleltablecopy | Specifies the number of tables that are copied over in parallel.| `2` | integer value | true | | +| migration.data.mssql.update.statistics.enabled | If set to true, runs UPDATE STATISTICS against all user-defined and internal tables in the target database after successful data migration. Applies only for MS SQL target database.| `false` | true or false | false | | +| migration.data.pipe.capacity | Specifies the capacity of the data pipe.| `100` | integer value | true | | +| migration.data.pipe.timeout | Specifies the timeout of the data pipe.| `7200` | integer value | true | | +| migration.data.reader.batchsize | Specifies the number of rows to read per batch. This only affects tables which can be batched.| `1000` | integer value | true | | +| migration.data.reader.batchsize.TABLE | Table individual batch size for reading data from source enabling tuning on read speed vs. memory usage. Replace the TABLE with the source table name without prefix.| | integer value | true | | +| migration.data.report.connectionstring | Specifies blob storage connection string for storing reporting files.| `${media.globalSettings.cloudAzureBlobStorageStrategy.connection}` | any azure blob storage connection string | true | | +| migration.data.synchronization.enabled | Activate data synchronization to external DB via cron jobs| `false` | true or false | true | | +| migration.data.tables.audit.enabled | Flag to enable the migration of audit tables.| `true` | true or false | true | | +| migration.data.tables.custom | Specifies a list of custom tables to migrate. Custom tables are tables that are not part of the commerce type system.| | comma separated list of table names. | true | | +| migration.data.tables.excluded | Tables to exclude from migration (use table names name without prefix)| `SYSTEMINIT,StoredHttpSessions,itemdeletionmarkers,tasks_aux_queue,tasks_aux_scheduler,tasks_aux_workers` | comma separated list of table names. | true | | +| migration.data.tables.included | Tables to include (use table names name without prefix)| | comma separated list of table names. | true | | +| migration.data.tables.order.first | Tables to be migrated as first (use table names with prefix and suffix)| | comma separated list of table full names. | true | | +| migration.data.tables.order.last | Tables to be migrated as last (use table names with prefix and suffix)| | comma separated list of table full names. | true | | +| migration.data.tables.partitioned | List of partitioned tables (use table names with prefix and suffix). Separate batches will be created for each table partition. This only applies when using HANA as source database.| | comma separated list of table full names. | true | | +| migration.data.truncate.enabled | Specifies if the target tables should be truncated before data is copied over.| `true` | true or false | true | | +| migration.data.truncate.excluded | If truncation of target tables is enabled, this property specifies tables that should be excluded from truncation.| | comma separated list of table names | true | migration.data.truncate.enabled | +| migration.data.view.name.pattern | Support views during data migration. String pattern for view naming convention with `'%s'` as table name. e.g. `v_%s`| `v_%s` | any string | true | | +| migration.data.view.t.TABLE.columnPrefix | Specifies the column prefix for specific table.| | any string | true | migration.data.view.t.TABLE.enabled | +| migration.data.view.t.TABLE.columnTransformation.COLUMN | Possibility to use custom functions to obfuscate values for specific columns| `GETDATE()` | any valid SQL function call | true | migration.data.view.t.TABLE.enabled | +| migration.data.view.t.TABLE.enabled | Activate DDL view generation for specific| `false` | any string | true | | +| migration.data.view.t.TABLE.joinWhereClause | Activate DDL view generation for specific _TABLE_, with additional `JOIN` clausule| `{table}` | any string | true | migration.data.view.t.TABLE.enabled | +| migration.data.workers.reader.maxtasks | Specifies the number of threads used per table to read data from source. Note that this value applies per table, so in total the number of threads will depend on 'migration.data.maxparalleltablecopy'. [total number of reader threads] = [migration.data.workers.reader.maxtasks] * [migration.data.maxparalleltablecopy]| `3` | integer value | true | migration.data.maxparalleltablecopy | +| migration.data.workers.retryattempts | Specifies the number of retries in case a worker task fails.| `0` | integer value | true | | +| migration.data.workers.writer.maxtasks | Specifies the number of threads used per table to write data to target. Note that this value applies per table, so in total the number of threads will depend on 'migration.data.maxparalleltablecopy'. [total number of writer threads] = [migration.data.workers.writer.maxtasks] * [migration.data.maxparalleltablecopy]| `10` | integer value | true | migration.data.maxparalleltablecopy | +| migration.ds.source.db.connection.pool.maxlifetime | Determines how long the source db connection can remain in the pool before it is closed and replaced, regardless of whether it is still active or idle.| `1800000` | any number | true | | +| migration.ds.source.db.connection.pool.size.active.max | Specifies maximum amount of active connections in the source db pool| `${db.pool.maxActive}` | integer value | false | | +| migration.ds.source.db.connection.pool.size.idle.max | Specifies maximum amount of connections in the source db pool| `${db.pool.maxIdle}` | integer value | false | | +| migration.ds.source.db.connection.pool.size.idle.min | Specifies minimum amount of idle connections available in the source db pool| `${db.pool.minIdle}` | integer value | false | | +| migration.ds.source.db.driver | Specifies the driver class for the source jdbc connection| | any valid jdbc driver class | false | | +| migration.ds.source.db.password | Specifies the password for the source jdbc connection| | any valid password for the jdbc connection | false | | +| migration.ds.source.db.schema | Specifies the schema the respective commerce installation is deployed to.| | any valid schema name for the commerce installation | false | | +| migration.ds.source.db.tableprefix | Specifies the table prefix used on the source commerce database. This may be relevant if a commerce installation was initialized using 'db.tableprefix'.| | any valid commerce database table prefix. | true | | +| migration.ds.source.db.typesystemname | Specifies the name of the type system that should be taken into account| `${db.type.system.name}` | any valid type system name | true | | +| migration.ds.source.db.typesystemsuffix | Specifies the suffix which is used for the source typesystem| | the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' | true | migration.ds.source.db.typesystemname | +| migration.ds.source.db.url | Specifies the url for the source jdbc connection| | any valid jdbc url | false | | +| migration.ds.source.db.username | Specifies the user name for the source jdbc connection| | any valid user name for the jdbc connection | false | | +| migration.ds.target.db.catalog | Specifies the catalog name for the target commerce database.| | any valid catalog name | true | | +| migration.ds.target.db.connection.pool.maxlifetime | Determines how long the target db connection can remain in the pool before it is closed and replaced, regardless of whether it is still active or idle.| `1800000` | any number | true | | +| migration.ds.target.db.connection.pool.size.active.max | Specifies maximum amount of connections in the target db pool| `${db.pool.maxActive}` | integer value | false | | +| migration.ds.target.db.connection.pool.size.idle.max | Specifies maximum amount of idle connections available in the target db pool| `${db.pool.maxIdle}` | integer value | false | | +| migration.ds.target.db.connection.pool.size.idle.min | Specifies minimum amount of idle connections available in the target db pool| `${db.pool.minIdle}` | integer value | false | | +| migration.ds.target.db.driver | Specifies the driver class for the target jdbc connection| `${db.driver}` | any valid jdbc driver class | false | | +| migration.ds.target.db.max.stage.migrations | When using the staged approach, multiple sets of commerce tables may exists (each having its own tableprefix). To prevent cluttering the db, this property specifies the maximum number of table sets that can exist, if exceeded the schema migrator will complain and suggest a cleanup.| `5` | integer value | true | | +| migration.ds.target.db.password | Specifies the password for the target jdbc connection| `${db.password}` | any valid password for the jdbc connection | false | | +| migration.ds.target.db.schema | Specifies the schema the target commerce installation is deployed to.| `dbo` | any valid schema name for the commerce installation | false | | +| migration.ds.target.db.tableprefix | Specifies the table prefix used on the target commerce database. This may be relevant if a commerce installation was initialized using `${db.tableprefix}` / staged approach.| `${db.tableprefix}` | any valid commerce database table prefix. | true | | +| migration.ds.target.db.typesystemname | Specifies the name of the type system that should be taken into account| `DEFAULT` | any valid type system name | true | | +| migration.ds.target.db.typesystemsuffix | Specifies the suffix which is used for the target typesystem| | the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' | true | migration.ds.source.db.typesystemname | +| migration.ds.target.db.url | Specifies the url for the target jdbc connection| `${db.url}` | any valid jdbc url | false | | +| migration.ds.target.db.username | Specifies the user name for the target jdbc connection| `${db.username}` | any valid user name for the jdbc connection | false | | +| migration.input.profiles | Specifies the profile name of data source that serves as migration input| `source` | name of the data source profile | true | | +| migration.internal.tables.storage | Specifies where to store the internal tables If empty and migration.data.synchronization.enabled is true, then it is set to "target", otherwise "source"| | target or source | true | | +| migration.locale.default | Specifies the default locale used.| `en-US` | any locale | true | | +| migration.log.sql | If set to true, the JDBC queries ran against the source and target data sources will be logged in the storage pointed by the property {migration.data.report.connectionstring}| `false` | true or false | false | | +| migration.log.sql.memory.flush.threshold.nbentries | Specifies the number of log entries to add to the in-memory collection of JDBC log entries of a JDBC queries store before flushing the collection contents into the blob file storage associated with the JDBC store's data souce and clearing the in-memory collection to free memory| `10000000` | an integer number | 10,000,000 | | +| migration.log.sql.source.showparameters | If set to true, the values of the parameters of the JDBC queries ran against the source data source will be logged in the JDBC queries logs (migration.log.sql has to be true to enable this type of logging). For security reasons, the tool will never log parameter values for the queries ran against the target datasource.| `true` | true or false | true | | +| migration.media.container.prefix | Custom prefix for media container name, used instead of default `master` or `db.tableprefix`. Can be used to use medias from non-standard location| `${db.tableprefix}` | any string | true | | +| migration.media.container.suffix | Extra suffix added to media container name if needed. If configured container will be named as `sys-PREFIX-SUFFIX-name`| | any string | true | | +| migration.memory.attempts | Number of attempts to wait for free memory| `300` | any number | false | | +| migration.memory.min | Delays reading until a minimum amount of memory is available| `5000000` | any number | false | | +| migration.memory.wait | Number of time to wait for free memory (milliseconds)| `2000` | any number | false | | +| migration.output.profiles | Specifies the profile name of data sources that serves as migration output| `target` | name of the data source profile | true | | +| migration.profiling | Activates enhanced memory usage logging| `false` | true or false | false | | +| migration.properties.masked | Specifies the properties that should be masked in HAC.| `migration.data.report.connectionstring,migration.ds.source.db.username,migration.ds.source.db.password,migration.ds.target.db.username,migration.ds.target.db.password` | any property key | true | | +| migration.scheduler.resume.enabled | If set to true, the migration will resume from where it stopped (either due to errors or cancellation).| `false` | true or false | true | | +| migration.schema.autotrigger.enabled | Specifies if the schema migrator should be automatically triggered before data copy process is started| `false` | true or false | true | migration.schema.enabled | +| migration.schema.enabled | Globally enables / disables schema migration. If set to false, no schema changes will be applied.| `true` | true or false | true | | +| migration.schema.target.columns.add.enabled | Specifies if columns which are missing in the target tables should be added by schema migration.| `true` | true or false | true | migration.schema.enabled | +| migration.schema.target.columns.remove.enabled | Specifies if extra columns in target tables (compared to source schema) should be removed by schema migration.| `true` | true or false | true | migration.schema.enabled | +| migration.schema.target.tables.add.enabled | Specifies if tables which are missing in the target should be added by schema migration.| `true` | true or false | true | migration.schema.enabled | +| migration.schema.target.tables.remove.enabled | Specifies if extra tables in target (compared to source schema) should be removed by schema migration.| `false` | true or false | true | migration.schema.enabled | +| migration.stalled.timeout | Specifies the timeout of the migration monitor. If there was no activity for too long the migration will be marked as 'stalled' and aborted.| `7200` | integer value | true | | +| migration.trigger.updatesystem | Specifies whether the data migration shall be triggered by the 'update running system' operation.| `false` | true or false | true | | diff --git a/docs/developer/DEVELOPER-GUIDE.md b/docs/developer/DEVELOPER-GUIDE.md index 449a55c..9e931d9 100644 --- a/docs/developer/DEVELOPER-GUIDE.md +++ b/docs/developer/DEVELOPER-GUIDE.md @@ -48,7 +48,7 @@ Go to the commercemigrationtest extension, like so: Alternatively go to the platformhome, and trigger it from there: ``` -platformhome>ant all integrationtests -Dtestclasses.packages=org.sap.move.commercemigrationtest.integration.* +platformhome>ant all integrationtests -Dtestclasses.packages=com.sap.cx.boosters.commercedbsynctest.integration.* ``` The integration tests are parameterized with predefined combinations of source and target databases. diff --git a/docs/user/USER-GUIDE-DATA-MIGRATION.md b/docs/user/USER-GUIDE-DATA-MIGRATION.md index ac4d914..5b3ddb5 100644 --- a/docs/user/USER-GUIDE-DATA-MIGRATION.md +++ b/docs/user/USER-GUIDE-DATA-MIGRATION.md @@ -17,7 +17,7 @@ Carefully read the prerequisites and make sure you meet the requirements before Before you begin, ensure you have met the following requirements: * Your code base is compatible with the SAP Commerce version required by SAP Commerce Cloud (at minimum). -* The code base is exactly the same in both target and source systems. It includes: +* The code base on source and destination systems is adjusted according to the guidance from [blog](https://community.sap.com/t5/crm-and-cx-blogs-by-sap/migration-and-upgrade-understanding-key-approaches/ba-p/13902274) page. It includes: * platform version * custom extensions * set of configured extensions diff --git a/docs/user/USER-GUIDE-DATA-REPLICATION.md b/docs/user/USER-GUIDE-DATA-REPLICATION.md index f514e6a..822926a 100644 --- a/docs/user/USER-GUIDE-DATA-REPLICATION.md +++ b/docs/user/USER-GUIDE-DATA-REPLICATION.md @@ -67,17 +67,18 @@ Properties require to reconfigure or readjusted for Data Sync. | Property | Mandatory | Default | Description | |--------------------------------------------------------|-----------|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| migration.data.export.enabled | yes | true | Activates data replication mode. Standard migration is not possinble in this state | +| migration.data.export.enabled | yes | true | Activates data replication mode. Standard migration is not possinble in this state | | migration.ds.source.db.url | yes | | DB url for source connection , default value should be **${db.url};ApplicationIntent=ReadOnly** ApplicationIntent can be adjusted or removed for local testing | -| migration.ds.source.db.schema | no | dbo | DB schema for source connection | +| migration.ds.source.db.schema | no | dbo | DB schema for source connection | | migration.ds.target.db.driver | yes | ${db.driver} | DB driver class for target connection | | migration.ds.target.db.username | yes | | DB username for target connection | | migration.ds.target.db.password | yes | | DB password for target connection | | migration.ds.target.db.tableprefix | no | ${db.tableprefix} | DB table prefix for target connection | | migration.ds.target.db.schema | no | dbo | DB schema for target connection | +| migration.internal.tables.storage | no | | Specifies where to store the internal tables (source or target). If empty and migration.data.export.enabled is true, then it is set to "target", otherwise "source" | | migration.data.tables.included | no | | Tables to be included in the migration. It is recommended to set this parameter during the first load of selective table sync, which will allow you to sync directly from HAC along with Schema. Eventually you can do very similar with full migration cron jobs by adjusting the list of tables. | -| migration.data.report.connectionstring | yes | ${media.globalSettings.cloudAzureBlobStorageStrategy.connection} | target blob storage for the report generation, although you can replace with Hotfolder Blob storage ${azure.hotfolder.storage.account.connection-string} | -| migration.data.workers.retryattempts | no | 0 | retry attempts if a batch (read or write) failed. | +| migration.data.report.connectionstring | yes | ${media.globalSettings.cloudAzureBlobStorageStrategy.connection} | Target blob storage for the report generation, although you can replace with Hotfolder Blob storage ${azure.hotfolder.storage.account.connection-string} | +| migration.data.workers.retryattempts | no | 0 | Retry attempts if a batch (read or write) failed. | ## CronJob Configuration reference Data Sync diff --git a/docs/user/db-sync-flows.svg b/docs/user/db-sync-flows.svg new file mode 100644 index 0000000..c7345cb --- /dev/null +++ b/docs/user/db-sync-flows.svg @@ -0,0 +1,4 @@ + + + +
EXTERNAL DATABASE
SAP CX COMMERCE 
with DB SYNC
uncommon scenario
source
target
source
target
migration.data.synchronization.enabled=false
migration.ds.target.db.url
migration.ds.target.db.url
migration.ds.source.db.url
migration.ds.source.db.url
MigrationContext.reversed?
source
target
migration.ds.target.db.url
migration.ds.source.db.url

Execution

No other activity (tasks) allowed in system.

  • Manual in /hac

Execution

Migration CANNOT be started from /hac 
Focused on data!
  • Manual and Scheduled in /backoffice  


  • compositeDatabaseMigrationJob 
  • compositeIncrementalMigrationJob 
  • compositeTableMigrationJob
  • compositeReverseTableMigrationJob 
  • compositeReverseIncrementalMigrationJob 
migration.data.synchronization.enabled=true
\ No newline at end of file