From 192c6e7ebe9c6b1d2aae1cd230b8377e760cc3c5 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Tue, 22 Jul 2025 22:05:28 -0700 Subject: [PATCH 01/17] Option to migrate SQL Server hard tables to PostgreSQL --- .../org/labkey/api/data/ContainerManager.java | 6 +- api/src/org/labkey/api/data/DbSchema.java | 2 +- .../org/labkey/api/module/ModuleLoader.java | 251 +++++++++++++++++- api/src/org/labkey/api/query/TableSorter.java | 24 +- .../org/labkey/api/settings/AppPropsImpl.java | 2 +- .../SystemMaintenanceStartupListener.java | 4 +- .../postgresql/assay-24.001-24.002.sql | 2 + audit/src/org/labkey/audit/AuditLogImpl.java | 4 +- .../postgresql/core-0.000-24.000.sql | 8 +- core/src/org/labkey/core/CoreModule.java | 109 ++++---- .../core/dialect/PostgreSql92Dialect.java | 10 + .../labkey/experiment/ExperimentModule.java | 4 +- .../labkey/mothership/MothershipModule.java | 3 +- wiki/src/org/labkey/wiki/WikiModule.java | 28 +- 14 files changed, 375 insertions(+), 82 deletions(-) diff --git a/api/src/org/labkey/api/data/ContainerManager.java b/api/src/org/labkey/api/data/ContainerManager.java index 82c2455b6b0..5090b9329d5 100644 --- a/api/src/org/labkey/api/data/ContainerManager.java +++ b/api/src/org/labkey/api/data/ContainerManager.java @@ -802,7 +802,7 @@ public static void updateDescription(Container container, String description, Us { ColumnValidators.validate(CORE.getTableInfoContainers().getColumn("Title"), null, 1, description); - //For some reason there is no primary key defined on core.containers + //For some reason, there is no primary key defined on core.containers //so we can't use Table.update here SQLFragment sql = new SQLFragment("UPDATE "); sql.append(CORE.getTableInfoContainers()); @@ -818,7 +818,7 @@ public static void updateDescription(Container container, String description, Us public static void updateSearchable(Container container, boolean searchable, User user) { - //For some reason there is no primary key defined on core.containers + //For some reason, there is no primary key defined on core.containers //so we can't use Table.update here SQLFragment sql = new SQLFragment("UPDATE "); sql.append(CORE.getTableInfoContainers()); @@ -2745,7 +2745,7 @@ public static int updateContainer(TableInfo dataTable, String idField, Collectio } /** - * If a container at the given path does not exist create one and set permissions. If the container does exist, + * If a container at the given path does not exist, create one and set permissions. If the container does exist, * permissions are only set if there is no explicit ACL for the container. This prevents us from resetting * permissions if all users are dropped. Implicitly done as an admin-level service user. */ diff --git a/api/src/org/labkey/api/data/DbSchema.java b/api/src/org/labkey/api/data/DbSchema.java index 94910a0deca..75a786a224b 100644 --- a/api/src/org/labkey/api/data/DbSchema.java +++ b/api/src/org/labkey/api/data/DbSchema.java @@ -148,7 +148,7 @@ public static Pair getDbScopeAndSchemaName(String fullyQualifie } } - Module getModule() + public Module getModule() { return _module; } diff --git a/api/src/org/labkey/api/module/ModuleLoader.java b/api/src/org/labkey/api/module/ModuleLoader.java index 84cd327c601..79d614e0e00 100644 --- a/api/src/org/labkey/api/module/ModuleLoader.java +++ b/api/src/org/labkey/api/module/ModuleLoader.java @@ -36,6 +36,7 @@ import org.labkey.api.collections.CopyOnWriteHashMap; import org.labkey.api.collections.LabKeyCollectors; import org.labkey.api.collections.Sets; +import org.labkey.api.data.ColumnInfo; import org.labkey.api.data.Container; import org.labkey.api.data.ConvertHelper; import org.labkey.api.data.CoreSchema; @@ -44,9 +45,11 @@ import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.DbScope; import org.labkey.api.data.FileSqlScriptProvider; +import org.labkey.api.data.JdbcType; import org.labkey.api.data.RuntimeSQLException; import org.labkey.api.data.SQLFragment; import org.labkey.api.data.SchemaNameCache; +import org.labkey.api.data.SchemaTableInfo; import org.labkey.api.data.SqlExecutor; import org.labkey.api.data.SqlScriptManager; import org.labkey.api.data.SqlScriptRunner; @@ -60,6 +63,7 @@ import org.labkey.api.data.dialect.DatabaseNotSupportedException; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.module.ModuleUpgrader.Execution; +import org.labkey.api.query.TableSorter; import org.labkey.api.resource.Resource; import org.labkey.api.security.SecurityManager; import org.labkey.api.security.UserManager; @@ -114,6 +118,9 @@ import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; @@ -136,6 +143,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.regex.Pattern; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.apache.commons.lang3.StringUtils.equalsIgnoreCase; import static org.apache.commons.lang3.StringUtils.isEmpty; @@ -184,6 +192,10 @@ public class ModuleLoader implements MemTrackerListener, ShutdownListener private UpgradeState _upgradeState; private final SqlScriptRunner _upgradeScriptRunner = new SqlScriptRunner(); + // emptySchemas flag is used to bootstrap all the database schemas without populating them with any data rows. This + // will be useful in the future when migrating SQL Server deployments to PostgreSQL. Note that a bootstrapped + // "emptySchemas" database will not be left in a usable state (until we actually implement the migration step). + private final boolean _emptySchemas = Boolean.valueOf(System.getProperty("emptySchemas")); // NOTE: the following startup fields are synchronized under STARTUP_LOCK private StartupState _startupState = StartupState.StartupIncomplete; @@ -1887,6 +1899,7 @@ private void afterUpgrade(File lockFile) lockFile.delete(); verifyRequiredModules(); + handleDatabaseMigration(); } // If the "requiredModules" parameter is present in application.properties then fail startup if any specified module is missing. @@ -1978,10 +1991,246 @@ public boolean isNewInstall() return _newInstall; } + // Are we bootstrapping a PostgreSQL database with empty schemas? + public boolean shouldInsertData() + { + return !(_emptySchemas && isNewInstall() && DbScope.getLabKeyScope().getSqlDialect().isPostgreSQL()); + } + + // If associated properties are set, migrate data from the external SQL Server data source into the just-created + // empty PostgreSQL schemas. + private void handleDatabaseMigration() + { + if (!shouldInsertData()) + { + clearSchemas(); + verifyEmptySchemas(); + migrateDatabase(); + } + } + + // As part of SQL Server -> PostgreSQL migration, clear the few tables that are needed for bootstrap + private void clearSchemas() + { + TableInfo containers = CoreSchema.getInstance().getTableInfoContainers(); + Table.delete(containers); // Now that we've bootstrapped, delete root and shared containers + DbScope targetScope = DbScope.getLabKeyScope(); + new SqlExecutor(targetScope).execute("ALTER SEQUENCE core.containers_rowid_seq RESTART"); // Reset Containers sequence + } + + // Verify that no data rows were inserted and no sequences were incremented + private void verifyEmptySchemas() + { + DbScope scope = DbScope.getLabKeyScope(); + + Map> schemaMap = scope.getSchemaNames().stream() + .map(name -> scope.getSchema(name, DbSchemaType.Unknown)) + .collect(Collectors.partitioningBy(schema -> schema.isModuleSchema() && schema.getModule().getSupportedDatabasesSet().contains(SupportedDatabase.mssql))); + + List targetSchemas = schemaMap.get(true); + List tableWarnings = targetSchemas.stream() + .flatMap(schema -> schema.getTableNames().stream() + .map(schema::getTable) + .filter(table -> table.getTableType() != DatabaseTableType.NOT_IN_DB) + ) + .map(table -> { + long rowCount = new TableSelector(table).getRowCount(); + if (rowCount > 0) + return table.getSelectName() + " has " + StringUtilsLabKey.pluralize(rowCount, "row"); + else + return null; + }) + .filter(Objects::nonNull) + .toList(); + + if (!tableWarnings.isEmpty()) + { + _log.warn("{} rows", StringUtilsLabKey.pluralize(tableWarnings.size(), "table has", "tables have")); + tableWarnings.forEach(_log::warn); + } + + List schemasToIgnore = schemaMap.get(false).stream() + .map(DbSchema::getName) + .toList(); + String qs = StringUtils.join(Collections.nCopies(schemasToIgnore.size(), "?"), ", "); + List sequenceWarnings = new SqlSelector(scope, new SQLFragment( + "SELECT schemaname || '.' || sequencename FROM pg_sequences WHERE last_value IS NOT NULL AND schemaname NOT IN (" + qs + ")", + schemasToIgnore + )) + .stream(String.class) + .toList(); + + if (!sequenceWarnings.isEmpty()) + { + _log.warn("{} a value:", StringUtilsLabKey.pluralize(sequenceWarnings.size(), "sequence has", "sequences have")); + sequenceWarnings.forEach(_log::warn); + } + } + + private void migrateDatabase() + { + DbScope targetScope = DbScope.getLabKeyScope(); + + String migrationDataSource = "ssDataSource"; // TODO: add a system property for this + DbScope sourceScope = DbScope.getDbScope(migrationDataSource); + if (null == sourceScope) + throw new ConfigurationException("Migration data source not found: " + migrationDataSource); + if (!sourceScope.getSqlDialect().isSqlServer()) + throw new ConfigurationException("Migration data source is not SQL Server: " + migrationDataSource); + + // Verify that all sequences in the target schema have an increment of 1, since that's an assumption below + Collection sequencesNonOneIncrement = new SqlSelector(targetScope, new SQLFragment("SELECT schemaname || '.' || sequencename || ': ' || increment_by FROM pg_sequences WHERE increment_by != 1")).getCollection(String.class); + if (!sequencesNonOneIncrement.isEmpty()) + { + throw new IllegalStateException(StringUtilsLabKey.pluralize(sequencesNonOneIncrement.size(), "sequence has", "sequences have") + " an increment other than 1: " + sequencesNonOneIncrement); + } + + // Get target schemas in module order, which accommodates foreign key relationships + List schemas = getModules().stream() + .flatMap(module -> module.getSchemaNames().stream().filter(name -> !module.getProvisionedSchemaNames().contains(name))) + .map(name -> targetScope.getSchema(name, DbSchemaType.Module)) + .toList(); + + for (DbSchema targetSchema : schemas) + { + DbSchema sourceSchema = sourceScope.getSchema(targetSchema.getName(), DbSchemaType.Bare); + if (!sourceSchema.existsInDatabase()) + { + _log.warn("{} has no schema named '{}'", migrationDataSource, targetSchema.getName()); + } + else + { + List sourceTables = getTables(sourceSchema); + List targetTables = getTables(targetSchema); + + if (sourceTables.size() == targetTables.size()) + { + _log.debug("{} schemas have the same number of tables ({})", sourceSchema.getName(), targetTables.size()); + } + else + { + _log.warn("{} schemas have different table counts, {} vs. {}", sourceSchema.getName(), sourceTables.size(), targetTables.size()); + Set sourceTableNames = sourceTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set targetTableNames = targetTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); + sourceTableNames.removeAll(targetTableNames); + targetTableNames.removeAll(sourceTableNamesCopy); + _log.warn("Differences: {} and {}", sourceTableNames, targetTableNames); + } + + Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); + for (TableInfo targetTable : TableSorter.sort(targetSchema, true)) + { + String targetTableName = targetTable.getName(); + if (tablesToIgnore.contains(targetTableName)) + continue; + SchemaTableInfo sourceTable = sourceSchema.getTable(targetTableName); + + if (sourceTable == null) + { + _log.warn("Source schema has no table named '{}'", targetTableName); + } + else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the views + { + Set columnNames = sourceTable.getColumnNameSet().stream() + .filter(name -> !"_ts".equals(name)) // TODO: remove only if column has a default set? + .collect(Collectors.toSet()); + + TableSelector sourceSelector = new TableSelector(sourceTable, columnNames).setJdbcCaching(false); + try (Stream> mapStream = sourceSelector.uncachedMapStream(); Connection conn = targetScope.getConnection()) + { + Collection sourceColumns = sourceSelector.getSelectedColumns(); + // Map the source columns to the target columns so we get the right order and casing for INSERT, etc. + Collection targetColumns = sourceColumns.stream().map(sourceCol -> targetTable.getColumn(sourceCol.getName())).toList(); + String q = StringUtils.join(Collections.nCopies(sourceColumns.size(), "?"), ", "); + SQLFragment sql = new SQLFragment("INSERT INTO ") + .append(targetTable.getSelectName()) + .append("("); + + String sep = ""; + for (ColumnInfo targetColumn : targetColumns) + { + sql.append(sep) + .append(targetColumn.getSelectIdentifier().getSql()); + sep = ", "; + } + + sql.append(") VALUES (") + .append(q) + .append(")"); + + PreparedStatement statement = conn.prepareStatement(sql.getRawSQL()); + + mapStream.forEach(map -> { + try + { + int i = 1; + for (ColumnInfo col : sourceColumns) + { + String name = col.getName(); + Object value = col.getValue(map); + if (name.equalsIgnoreCase("Deleted") && targetTable.getColumn(name).getJdbcType() == JdbcType.INTEGER) + value = (boolean)value ? 1 : 0; + statement.setObject(i++, value); + } + statement.execute(); + } + catch (SQLException e) + { + throw new RuntimeException(e); + } + }); + + // TODO: Better approach is probably to query "SELECT * FROM sys.identity_columns WHERE Last_value IS NOT NULL" and set corresponding source sequences to last_value (+1?) + Long current = new SqlSelector(sourceSchema, "SELECT IDENT_CURRENT(?)", sourceTable.getSelectName()).getObject(Long.class); + if (null != current) + { + _log.info("{}: {}", sourceTable.getSelectName(), current); + List autoIncColumnNames = targetTable.getPkColumns().stream() + .filter(ColumnInfo::isAutoIncrement) + .map(col -> col.getSelectIdentifier().getId()) + .toList(); + if (autoIncColumnNames.size() == 1) + { + String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), autoIncColumnNames.get(0)).getObject(String.class); + new SqlExecutor(targetScope).execute(new SQLFragment("ALTER SEQUENCE " + sequenceName + " RESTART WITH " + current + 1)); + } + else + { + _log.warn("Unexpected number of auto-increment columns for {}: {}", targetTable.getSelectName(), autoIncColumnNames); + } + } + } + catch (Exception e) + { + _log.error("Exception: ", e); + } + } + } + } + } + + // We can't handle provisioned tables yet, so (for now) delete all audit table remnants, allowing the new DB to create these tables on restart + SqlExecutor executor = new SqlExecutor(targetScope); + executor.execute("DELETE FROM exp.PropertyDomain WHERE DomainId IN (SELECT DomainID FROM exp.DomainDescriptor WHERE storageschemaname = 'audit')"); + executor.execute("DELETE FROM exp.DomainDescriptor WHERE storageschemaname = 'audit'"); + executor.execute("DELETE FROM exp.PropertyDescriptor WHERE PropertyId NOT IN (SELECT PropertyId FROM exp.PropertyDomain)"); + + System.exit(0); + } + + private List getTables(DbSchema schema) + { + return new ArrayList<>(schema.getTableNames().stream() + .map(schema::getTable) + .filter(table -> table.getTableType() == DatabaseTableType.TABLE) + .toList()); + } + public void destroy() { // In the case of a startup failure, _modules may be null. We want to allow a context reload to succeed in this case, - // since the reload may contain the code change to fix the problem + // since the reload may contain a code change to fix the problem. var modules = getModules(); if (modules != null) { diff --git a/api/src/org/labkey/api/query/TableSorter.java b/api/src/org/labkey/api/query/TableSorter.java index 3dc927cad60..757a3b72536 100644 --- a/api/src/org/labkey/api/query/TableSorter.java +++ b/api/src/org/labkey/api/query/TableSorter.java @@ -57,7 +57,7 @@ public static List sort(UserSchema schema) for (String tableName : tableNames) tables.put(tableName, schema.getTable(tableName)); - return sort(schemaName, tables); + return sort(schemaName, tables, false); } /** @@ -66,6 +66,16 @@ public static List sort(UserSchema schema) * @throws IllegalStateException if a loop is detected. */ public static List sort(DbSchema schema) + { + return sort(schema, false); + } + + /** + * Get a topologically sorted list of TableInfos within this schema. + * + * @throws IllegalStateException if a loop is detected and tolerateLoops is false. + */ + public static List sort(DbSchema schema, boolean tolerateLoops) { String schemaName = schema.getName(); Set tableNames = new HashSet<>(schema.getTableNames()); @@ -73,10 +83,10 @@ public static List sort(DbSchema schema) for (String tableName : tableNames) tables.put(tableName, schema.getTable(tableName)); - return sort(schemaName, tables); + return sort(schemaName, tables, tolerateLoops); } - private static List sort(String schemaName, Map tables) + private static List sort(String schemaName, Map tables, boolean tolerateLoops) { if (tables.isEmpty()) return Collections.emptyList(); @@ -145,14 +155,14 @@ private static List sort(String schemaName, Map ta Set visited = new HashSet<>(tables.size()); List sorted = new ArrayList<>(tables.size()); for (String tableName : startTables) - depthFirstWalk(schemaName, tables, tables.get(tableName), visited, new LinkedList<>(), sorted); + depthFirstWalk(schemaName, tables, tables.get(tableName), visited, new LinkedList<>(), sorted, tolerateLoops); return sorted; } - private static void depthFirstWalk(String schemaName, Map tables, TableInfo table, Set visited, LinkedList> visitingPath, List sorted) + private static void depthFirstWalk(String schemaName, Map tables, TableInfo table, Set visited, LinkedList> visitingPath, List sorted, boolean tolerateLoops) { - if (hasLoop(visitingPath, table)) + if (!tolerateLoops && hasLoop(visitingPath, table)) { String msg = "Loop detected in schema '" + schemaName + "':\n" + formatPath(visitingPath); if (anyHaveContainerColumn(visitingPath)) @@ -208,7 +218,7 @@ private static void depthFirstWalk(String schemaName, Map tab if (lookupTable != null) { visitingPath.addLast(Tuple3.of(table, column, lookupTable)); - depthFirstWalk(schemaName, tables, lookupTable, visited, visitingPath, sorted); + depthFirstWalk(schemaName, tables, lookupTable, visited, visitingPath, sorted, tolerateLoops); visitingPath.removeLast(); } } diff --git a/api/src/org/labkey/api/settings/AppPropsImpl.java b/api/src/org/labkey/api/settings/AppPropsImpl.java index 80d9b622895..091f0b7e9a6 100644 --- a/api/src/org/labkey/api/settings/AppPropsImpl.java +++ b/api/src/org/labkey/api/settings/AppPropsImpl.java @@ -376,7 +376,7 @@ public String getServerGUID() } } String serverGUID = lookupStringValue(SERVER_GUID, SERVER_SESSION_GUID); - if (serverGUID.equals(SERVER_SESSION_GUID)) + if (serverGUID.equals(SERVER_SESSION_GUID) && ModuleLoader.getInstance().shouldInsertData()) { try (var ignore = SpringActionController.ignoreSqlUpdates()) { diff --git a/api/src/org/labkey/api/util/SystemMaintenanceStartupListener.java b/api/src/org/labkey/api/util/SystemMaintenanceStartupListener.java index 5893d22b79c..995a7eb066f 100644 --- a/api/src/org/labkey/api/util/SystemMaintenanceStartupListener.java +++ b/api/src/org/labkey/api/util/SystemMaintenanceStartupListener.java @@ -55,7 +55,9 @@ public void handle(Map properties) .collect( Collectors.partitioningBy(prop -> Boolean.valueOf(prop.getValue()), Collectors.mapping(StartupPropertyEntry::getName, Collectors.toSet())) ); - SystemMaintenance.ensureTaskProperties(map.get(true), map.get(false)); + + if (!properties.isEmpty()) + SystemMaintenance.ensureTaskProperties(map.get(true), map.get(false)); } } } \ No newline at end of file diff --git a/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql b/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql index 371e9cbbca9..8beda105fa1 100644 --- a/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql +++ b/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql @@ -10,6 +10,7 @@ CREATE TABLE assay.PlateType CONSTRAINT UQ_PlateType_Rows_Cols UNIQUE (Rows, Columns) ); +@SkipOnEmptySchemasBegin INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (3, 4, '12 well (3x4)'); INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (4, 6, '24 well (4x6)'); INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (6, 8, '48 well (6x8)'); @@ -17,6 +18,7 @@ INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (8, 12, '96 well INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (16, 24, '384 well (16x24)'); INSERT INTO assay.PlateType (Rows, Columns, Description, Archived) VALUES (32, 48, '1536 well (32x48)', TRUE); INSERT INTO assay.PlateType (Rows, Columns, Description, Archived) VALUES (0, 0, 'Invalid Plate Type (Plates which were created with non-valid row & column combinations)', TRUE); +@SkipOnEmptySchemasEnd -- Rename type column to assayType ALTER TABLE assay.Plate RENAME COLUMN Type TO AssayType; diff --git a/audit/src/org/labkey/audit/AuditLogImpl.java b/audit/src/org/labkey/audit/AuditLogImpl.java index 369e7e7fdb2..d93a35c3c31 100644 --- a/audit/src/org/labkey/audit/AuditLogImpl.java +++ b/audit/src/org/labkey/audit/AuditLogImpl.java @@ -38,6 +38,7 @@ import org.labkey.api.data.Sort; import org.labkey.api.data.TableSelector; import org.labkey.api.exp.api.ExperimentService; +import org.labkey.api.module.ModuleLoader; import org.labkey.api.query.FieldKey; import org.labkey.api.query.QueryService; import org.labkey.api.query.UserSchema; @@ -92,7 +93,8 @@ public static AuditLogImpl get() private AuditLogImpl() { - ContextListener.addStartupListener(this); + if (ModuleLoader.getInstance().shouldInsertData()) + ContextListener.addStartupListener(this); } @Override diff --git a/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql b/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql index c3b1dfccb35..241367f599b 100644 --- a/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql +++ b/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql @@ -46,19 +46,17 @@ CREATE TABLE core.Logins CREATE TABLE core.Principals ( - UserId SERIAL, -- user or group + UserId INT GENERATED BY DEFAULT AS IDENTITY ( MINVALUE 1000 ), -- user or group Container ENTITYID, -- NULL for all users, NOT NULL for _ALL_ groups OwnerId ENTITYID NULL, Name VARCHAR(64), -- email (must contain @ and .), group name (no punctuation), or hidden (no @) - Type CHAR(1), -- 'u'=user 'g'=group (NYI 'r'=role, 'm'=managed(module specific) + Type CHAR(1), -- 'u'=user 'g'=group 'm'=module-specific Active BOOLEAN NOT NULL DEFAULT TRUE, CONSTRAINT PK_Principals PRIMARY KEY (UserId), CONSTRAINT UQ_Principals_Container_Name_OwnerId UNIQUE (Container, Name, OwnerId) ); -SELECT SETVAL('core.Principals_UserId_Seq', 1000); - -- maps users to groups CREATE TABLE core.Members ( @@ -474,6 +472,7 @@ CREATE TABLE core.EmailOptions CONSTRAINT PK_EmailOptions PRIMARY KEY (EmailOptionId) ); +@SkipOnEmptySchemasBegin INSERT INTO core.EmailOptions (EmailOptionId, EmailOption) VALUES (0, 'No Email'); INSERT INTO core.EmailOptions (EmailOptionId, EmailOption) VALUES (1, 'All conversations'); INSERT INTO core.EmailOptions (EmailOptionId, EmailOption) VALUES (2, 'My conversations'); @@ -493,6 +492,7 @@ INSERT INTO core.emailOptions (EmailOptionId, EmailOption, Type) VALUES (702, 'A -- labbook email notification options INSERT INTO core.emailOptions (EmailOptionId, EmailOption, Type) VALUES (801, 'No Email', 'labbook'); INSERT INTO core.emailOptions (EmailOptionId, EmailOption, Type) VALUES (802, 'All emails', 'labbook'); +@SkipOnEmptySchemasEnd CREATE TABLE core.EmailPrefs ( diff --git a/core/src/org/labkey/core/CoreModule.java b/core/src/org/labkey/core/CoreModule.java index d3c559450b5..6e60a28c0f0 100644 --- a/core/src/org/labkey/core/CoreModule.java +++ b/core/src/org/labkey/core/CoreModule.java @@ -81,7 +81,9 @@ import org.labkey.api.data.dialect.SqlDialectRegistry; import org.labkey.api.data.statistics.StatsService; import org.labkey.api.dataiterator.SimpleTranslator; +import org.labkey.api.exp.OntologyManager; import org.labkey.api.exp.property.PropertyService; +import org.labkey.api.exp.property.SystemProperty; import org.labkey.api.exp.property.TestDomainKind; import org.labkey.api.external.tools.ExternalToolsViewService; import org.labkey.api.files.FileBrowserConfigImporter; @@ -372,8 +374,6 @@ public class CoreModule extends SpringModule implements SearchService.DocumentPr private static final Logger LOG = LogHelper.getLogger(CoreModule.class, "Errors during server startup and shut down"); public static final String PROJECTS_WEB_PART_NAME = "Projects"; - static Runnable _afterUpdateRunnable = null; - static { // Accept most of the standard Quartz properties, but set the misfire threshold to five minutes. This prevents @@ -842,7 +842,8 @@ public void afterUpdate(ModuleContext moduleContext) } // Increment on every core module upgrade to defeat browser caching of static resources. - WriteableAppProps.incrementLookAndFeelRevisionAndSave(); + if (ModuleLoader.getInstance().shouldInsertData()) + WriteableAppProps.incrementLookAndFeelRevisionAndSave(); // Allow dialect to make adjustments to the just upgraded core database (e.g., install aggregate functions, etc.) CoreSchema.getInstance().getSqlDialect().afterCoreUpgrade(moduleContext); @@ -858,66 +859,76 @@ public void afterUpdate(ModuleContext moduleContext) // core.Containers metadata and a few common containers. This may prevent some deadlocks during upgrade, #33550. CacheManager.addListener(() -> { ContainerManager.getRoot(); - ContainerManager.getHomeContainer(); ContainerManager.getSharedContainer(); + if (ModuleLoader.getInstance().shouldInsertData()) + { + ContainerManager.getHomeContainer(); + } }); - - if (_afterUpdateRunnable != null) - _afterUpdateRunnable.run(); } private void bootstrap() { - // Create the initial groups - GroupManager.bootstrapGroup(Group.groupUsers, "Users"); - GroupManager.bootstrapGroup(Group.groupGuests, "Guests"); - - // Other containers inherit permissions from root; admins get all permissions, users & guests none - Role noPermsRole = RoleManager.getRole(NoPermissionsRole.class); - Role readerRole = RoleManager.getRole(ReaderRole.class); + if (ModuleLoader.getInstance().shouldInsertData()) + { + // Create the initial groups + GroupManager.bootstrapGroup(Group.groupUsers, "Users"); + GroupManager.bootstrapGroup(Group.groupGuests, "Guests"); - ContainerManager.bootstrapContainer("/", noPermsRole, noPermsRole); - Container rootContainer = ContainerManager.getRoot(); + // Other containers inherit permissions from root; admins get all permissions, users & guests none + Role noPermsRole = RoleManager.getRole(NoPermissionsRole.class); + Role readerRole = RoleManager.getRole(ReaderRole.class); - // Create all the standard containers (Home, Home/support, Shared) using an empty Collaboration folder type - FolderType collaborationType = new CollaborationFolderType(Collections.emptyList()); + ContainerManager.bootstrapContainer("/", noPermsRole, noPermsRole); + Container rootContainer = ContainerManager.getRoot(); - // Users & guests can read from /home - Container home = ContainerManager.bootstrapContainer(ContainerManager.HOME_PROJECT_PATH, readerRole, readerRole); - home.setFolderType(collaborationType, null); + // Create all the standard containers (Home, Home/support, Shared) using an empty Collaboration folder type + FolderType collaborationType = new CollaborationFolderType(Collections.emptyList()); - ContainerManager.createDefaultSupportContainer().setFolderType(collaborationType, null); + // Users & guests can read from /home + Container home = ContainerManager.bootstrapContainer(ContainerManager.HOME_PROJECT_PATH, readerRole, readerRole); + home.setFolderType(collaborationType, null); - // Only users can read from /Shared - ContainerManager.bootstrapContainer(ContainerManager.SHARED_CONTAINER_PATH, readerRole, null).setFolderType(collaborationType, null); + ContainerManager.createDefaultSupportContainer().setFolderType(collaborationType, null); - try - { - // Need to insert standard MV indicators for the root -- okay to call getRoot() since we just created it. - String rootContainerId = rootContainer.getId(); - TableInfo mvTable = CoreSchema.getInstance().getTableInfoMvIndicators(); + // Only users can read from /Shared + ContainerManager.bootstrapContainer(ContainerManager.SHARED_CONTAINER_PATH, readerRole, null).setFolderType(collaborationType, null); - for (Map.Entry qcEntry : MvUtil.getDefaultMvIndicators().entrySet()) + try { - Map params = new HashMap<>(); - params.put("Container", rootContainerId); - params.put("MvIndicator", qcEntry.getKey()); - params.put("Label", qcEntry.getValue()); + // Need to insert standard MV indicators for the root -- okay to call getRoot() since we just created it. + String rootContainerId = rootContainer.getId(); + TableInfo mvTable = CoreSchema.getInstance().getTableInfoMvIndicators(); - Table.insert(null, mvTable, params); + for (Map.Entry qcEntry : MvUtil.getDefaultMvIndicators().entrySet()) + { + Map params = new HashMap<>(); + params.put("Container", rootContainerId); + params.put("MvIndicator", qcEntry.getKey()); + params.put("Label", qcEntry.getValue()); + + Table.insert(null, mvTable, params); + } + } + catch (Throwable t) + { + ExceptionUtil.logExceptionToMothership(null, t); } + + List guids = Stream.of(ContainerManager.HOME_PROJECT_PATH, ContainerManager.SHARED_CONTAINER_PATH) + .map(ContainerManager::getForPath) + .filter(Objects::nonNull) + .map(Container::getEntityId) + .collect(Collectors.toList()); + ContainerManager.setExcludedProjects(guids, () -> {}); } - catch (Throwable t) + else { - ExceptionUtil.logExceptionToMothership(null, t); + // It's very difficult to bootstrap without the root or shared containers in place; create them now and + // we'll delete them later + Container root = ContainerManager.ensureContainer("/", User.getAdminServiceUser()); + Table.insert(null, CoreSchema.getInstance().getTableInfoContainers(), Map.of("Parent", root.getId(), "Name", "Shared")); } - - List guids = Stream.of(ContainerManager.HOME_PROJECT_PATH, ContainerManager.SHARED_CONTAINER_PATH) - .map(ContainerManager::getForPath) - .filter(Objects::nonNull) - .map(Container::getEntityId) - .collect(Collectors.toList()); - ContainerManager.setExcludedProjects(guids, () -> {}); } @@ -954,11 +965,10 @@ public void startupAfterSpringConfig(ModuleContext moduleContext) AnalyticsServiceImpl.get().resetCSP(); - if (moduleContext.isNewInstall()) + if (moduleContext.isNewInstall() && ModuleLoader.getInstance().shouldInsertData()) { - // In order to initialize the portal layout correctly, we need to add the web parts after the folder - // types have been registered. Thus, needs to be here in startupAfterSpringConfig() instead of grouped - // in bootstrap(). + // To initialize the portal layout correctly, we need to add the web parts after the folder types have been + // registered. Thus, it needs to be here in startupAfterSpringConfig() instead of grouped in bootstrap(). Container homeContainer = ContainerManager.getHomeContainer(); int count = Portal.getParts(homeContainer, homeContainer.getFolderType().getDefaultPageId(homeContainer)).size(); addWebPart(PROJECTS_WEB_PART_NAME, homeContainer, HttpView.BODY, count); @@ -1148,7 +1158,8 @@ public void moduleStartupComplete(ServletContext servletContext) if (null != PropertyService.get()) { PropertyService.get().registerDomainKind(new UsersDomainKind()); - UsersDomainKind.ensureDomain(moduleContext); + if (ModuleLoader.getInstance().shouldInsertData()) + UsersDomainKind.ensureDomain(moduleContext); } // Register the standard, wiki-based terms-of-use provider diff --git a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java index c653dbb4cc4..9e6e7b931bb 100644 --- a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java +++ b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java @@ -24,6 +24,7 @@ import org.labkey.api.data.dialect.JdbcHelper; import org.labkey.api.data.dialect.BasePostgreSqlDialect; import org.labkey.api.data.dialect.StandardJdbcHelper; +import org.labkey.api.module.ModuleLoader; import org.labkey.api.util.HtmlString; import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.view.template.Warnings; @@ -204,4 +205,13 @@ private static String truncateBytes(String str, int maxBytes) assert !StringUtilsLabKey.hasBrokenSurrogate(str); return str; } + + private static final Pattern SKIP_INSERTS_PATTERN = Pattern.compile("^\\s*@SkipOnEmptySchemasBegin$(.+?)^\\s*@SkipOnEmptySchemasEnd\\s*$", Pattern.MULTILINE + Pattern.DOTALL); + private static final Pattern STRIP_ANNOTATIONS_PATTERN = Pattern.compile("^\\s*@SkipOnEmptySchemasBegin$|^\\s*@SkipOnEmptySchemasEnd\\s*$", Pattern.MULTILINE + Pattern.DOTALL); + + @Override // Temporary override to help with SQL Server migration + protected Pattern getSQLScriptSplitPattern() + { + return ModuleLoader.getInstance().shouldInsertData() ? STRIP_ANNOTATIONS_PATTERN : SKIP_INSERTS_PATTERN; + } } diff --git a/experiment/src/org/labkey/experiment/ExperimentModule.java b/experiment/src/org/labkey/experiment/ExperimentModule.java index 5c0557e33fa..3b8413cf37d 100644 --- a/experiment/src/org/labkey/experiment/ExperimentModule.java +++ b/experiment/src/org/labkey/experiment/ExperimentModule.java @@ -71,6 +71,7 @@ import org.labkey.api.files.FileContentService; import org.labkey.api.files.TableUpdaterFileListener; import org.labkey.api.module.ModuleContext; +import org.labkey.api.module.ModuleLoader; import org.labkey.api.module.SpringModule; import org.labkey.api.module.Summary; import org.labkey.api.ontology.OntologyService; @@ -557,7 +558,8 @@ public void containerDeleted(Container c, User user) // but it should be before the CoreContainerListener ContainerManager.ContainerListener.Order.Last); - SystemProperty.registerProperties(); + if (ModuleLoader.getInstance().shouldInsertData()) + SystemProperty.registerProperties(); FolderSerializationRegistry folderRegistry = FolderSerializationRegistry.get(); if (null != folderRegistry) diff --git a/mothership/src/org/labkey/mothership/MothershipModule.java b/mothership/src/org/labkey/mothership/MothershipModule.java index 471eecdde46..a20aa2d06ad 100644 --- a/mothership/src/org/labkey/mothership/MothershipModule.java +++ b/mothership/src/org/labkey/mothership/MothershipModule.java @@ -22,6 +22,7 @@ import org.labkey.api.module.DefaultModule; import org.labkey.api.module.Module; import org.labkey.api.module.ModuleContext; +import org.labkey.api.module.ModuleLoader; import org.labkey.api.security.User; import org.labkey.api.security.UserManager; import org.labkey.api.util.MothershipReport; @@ -89,7 +90,7 @@ public boolean hasScripts() @Override public void afterUpdate(ModuleContext moduleContext) { - if (moduleContext.isNewInstall()) + if (moduleContext.isNewInstall() && ModuleLoader.getInstance().shouldInsertData()) bootstrap(moduleContext); } diff --git a/wiki/src/org/labkey/wiki/WikiModule.java b/wiki/src/org/labkey/wiki/WikiModule.java index 096fd62a081..01000e6dc7d 100644 --- a/wiki/src/org/labkey/wiki/WikiModule.java +++ b/wiki/src/org/labkey/wiki/WikiModule.java @@ -28,6 +28,7 @@ import org.labkey.api.data.SqlExecutor; import org.labkey.api.module.CodeOnlyModule; import org.labkey.api.module.ModuleContext; +import org.labkey.api.module.ModuleLoader; import org.labkey.api.search.SearchService; import org.labkey.api.security.User; import org.labkey.api.util.PageFlowUtil; @@ -123,20 +124,23 @@ public void doStartup(ModuleContext moduleContext) private void bootstrap(ModuleContext moduleContext) { - Container supportContainer = ContainerManager.getDefaultSupportContainer(); - Container homeContainer = ContainerManager.getHomeContainer(); - Container sharedContainer = ContainerManager.getSharedContainer(); - String defaultPageName = "default"; + if (ModuleLoader.getInstance().shouldInsertData()) + { + Container supportContainer = ContainerManager.getDefaultSupportContainer(); + Container homeContainer = ContainerManager.getHomeContainer(); + Container sharedContainer = ContainerManager.getSharedContainer(); + String defaultPageName = "default"; - loadWikiContent(homeContainer, moduleContext.getUpgradeUser(), defaultPageName, "Welcome to LabKey Server", "/org/labkey/wiki/welcomeWiki.txt"); - loadWikiContent(supportContainer, moduleContext.getUpgradeUser(), defaultPageName, "Welcome to LabKey Support", "/org/labkey/wiki/supportWiki.txt"); - loadWikiContent(sharedContainer, moduleContext.getUpgradeUser(), defaultPageName, "Shared Resources", "/org/labkey/wiki/sharedWiki.txt"); + loadWikiContent(homeContainer, moduleContext.getUpgradeUser(), defaultPageName, "Welcome to LabKey Server", "/org/labkey/wiki/welcomeWiki.txt"); + loadWikiContent(supportContainer, moduleContext.getUpgradeUser(), defaultPageName, "Welcome to LabKey Support", "/org/labkey/wiki/supportWiki.txt"); + loadWikiContent(sharedContainer, moduleContext.getUpgradeUser(), defaultPageName, "Shared Resources", "/org/labkey/wiki/sharedWiki.txt"); - addWebPart(supportContainer, defaultPageName); - addWebPart(sharedContainer, defaultPageName); + addWebPart(supportContainer, defaultPageName); + addWebPart(sharedContainer, defaultPageName); - // Add a wiki webpart with the default content. - addWebPart(homeContainer, defaultPageName); + // Add a wiki webpart with the default content. + addWebPart(homeContainer, defaultPageName); + } } private void addWebPart(@Nullable Container c, String wikiName) @@ -159,7 +163,7 @@ public Collection getSummary(Container c) { int count = WikiSelectManager.getPageCount(c); if (count > 0) - list.add("" + count + " Wiki Page" + (count > 1 ? "s" : "")); + list.add(count + " Wiki Page" + (count > 1 ? "s" : "")); } catch (Exception x) { From dade22953b4b969b43b60c6b0c9a726c0caa6fc7 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Wed, 23 Jul 2025 20:58:48 -0700 Subject: [PATCH 02/17] migrateDataSource argument. Improve sequence handling. --- .../org/labkey/api/module/ModuleLoader.java | 76 ++++++++++++------- 1 file changed, 49 insertions(+), 27 deletions(-) diff --git a/api/src/org/labkey/api/module/ModuleLoader.java b/api/src/org/labkey/api/module/ModuleLoader.java index 79d614e0e00..5a3346fe507 100644 --- a/api/src/org/labkey/api/module/ModuleLoader.java +++ b/api/src/org/labkey/api/module/ModuleLoader.java @@ -192,10 +192,12 @@ public class ModuleLoader implements MemTrackerListener, ShutdownListener private UpgradeState _upgradeState; private final SqlScriptRunner _upgradeScriptRunner = new SqlScriptRunner(); - // emptySchemas flag is used to bootstrap all the database schemas without populating them with any data rows. This - // will be useful in the future when migrating SQL Server deployments to PostgreSQL. Note that a bootstrapped - // "emptySchemas" database will not be left in a usable state (until we actually implement the migration step). - private final boolean _emptySchemas = Boolean.valueOf(System.getProperty("emptySchemas")); + + // This argument is used to specify a Microsoft SQL Server database that is the source of migration to PostgreSQL + private final String _migrationDataSource = System.getProperty("migrationDataSource"); + // This argument is used to bootstrap all the database schemas without populating them with any data rows. It can + // be used by itself to test the empty schema handling without attempting a full migration. + private final boolean _emptySchemas = Boolean.valueOf(System.getProperty("emptySchemas")) || _migrationDataSource != null; // NOTE: the following startup fields are synchronized under STARTUP_LOCK private StartupState _startupState = StartupState.StartupIncomplete; @@ -1997,6 +1999,11 @@ public boolean shouldInsertData() return !(_emptySchemas && isNewInstall() && DbScope.getLabKeyScope().getSqlDialect().isPostgreSQL()); } + public @Nullable String getMigrationDataSource() + { + return _migrationDataSource != null && isNewInstall() && DbScope.getLabKeyScope().getSqlDialect().isPostgreSQL() ? _migrationDataSource : null; + } + // If associated properties are set, migrate data from the external SQL Server data source into the just-created // empty PostgreSQL schemas. private void handleDatabaseMigration() @@ -2005,11 +2012,13 @@ private void handleDatabaseMigration() { clearSchemas(); verifyEmptySchemas(); - migrateDatabase(); } + + if (getMigrationDataSource() != null) + migrateDatabase(); } - // As part of SQL Server -> PostgreSQL migration, clear the few tables that are needed for bootstrap + // As part of SQL Server -> PostgreSQL migration, clear containers needed for bootstrap private void clearSchemas() { TableInfo containers = CoreSchema.getInstance().getTableInfoContainers(); @@ -2067,11 +2076,12 @@ private void verifyEmptySchemas() } } + private record Sequence(String schemaName, String tableName, String columnName, int lastValue) {} + private void migrateDatabase() { DbScope targetScope = DbScope.getLabKeyScope(); - - String migrationDataSource = "ssDataSource"; // TODO: add a system property for this + String migrationDataSource = getMigrationDataSource(); DbScope sourceScope = DbScope.getDbScope(migrationDataSource); if (null == sourceScope) throw new ConfigurationException("Migration data source not found: " + migrationDataSource); @@ -2085,7 +2095,28 @@ private void migrateDatabase() throw new IllegalStateException(StringUtilsLabKey.pluralize(sequencesNonOneIncrement.size(), "sequence has", "sequences have") + " an increment other than 1: " + sequencesNonOneIncrement); } - // Get target schemas in module order, which accommodates foreign key relationships + // Select the SQL Server sequences with non-null last value. We'll use the results to set PostgreSQL sequences after copying data. + String sequenceQuery = """ + SELECT + OBJECT_SCHEMA_NAME(tables.object_id, db_id()) AS SchemaName, + tables.name AS TableName, + identity_columns.name AS ColumnName, + identity_columns.seed_value, + identity_columns.increment_value, + identity_columns.last_value + FROM + sys.tables tables + JOIN + sys.identity_columns identity_columns ON tables.object_id = identity_columns.object_id + WHERE last_value IS NOT NULL"""; + Map> sequences = new HashMap<>(); + new SqlSelector(sourceScope, sequenceQuery).forEach(rs -> { + Sequence sequence = new Sequence(rs.getString("SchemaName"), rs.getString("TableName"), rs.getString("ColumnName"), rs.getInt("last_value")); + Map schemaMap = sequences.computeIfAbsent(sequence.schemaName(), s -> new HashMap<>()); + schemaMap.put(sequence.tableName(), sequence); + }); + + // Get target schemas in module order, which helps with foreign key relationships List schemas = getModules().stream() .flatMap(module -> module.getSchemaNames().stream().filter(name -> !module.getProvisionedSchemaNames().contains(name))) .map(name -> targetScope.getSchema(name, DbSchemaType.Module)) @@ -2118,6 +2149,7 @@ private void migrateDatabase() _log.warn("Differences: {} and {}", sourceTableNames, targetTableNames); } + Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); for (TableInfo targetTable : TableSorter.sort(targetSchema, true)) { @@ -2181,24 +2213,14 @@ else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the view } }); - // TODO: Better approach is probably to query "SELECT * FROM sys.identity_columns WHERE Last_value IS NOT NULL" and set corresponding source sequences to last_value (+1?) - Long current = new SqlSelector(sourceSchema, "SELECT IDENT_CURRENT(?)", sourceTable.getSelectName()).getObject(Long.class); - if (null != current) + Sequence sequence = schemaSequenceMap.get(targetTable.getName()); + if (sequence != null) { - _log.info("{}: {}", sourceTable.getSelectName(), current); - List autoIncColumnNames = targetTable.getPkColumns().stream() - .filter(ColumnInfo::isAutoIncrement) - .map(col -> col.getSelectIdentifier().getId()) - .toList(); - if (autoIncColumnNames.size() == 1) - { - String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), autoIncColumnNames.get(0)).getObject(String.class); - new SqlExecutor(targetScope).execute(new SQLFragment("ALTER SEQUENCE " + sequenceName + " RESTART WITH " + current + 1)); - } - else - { - _log.warn("Unexpected number of auto-increment columns for {}: {}", targetTable.getSelectName(), autoIncColumnNames); - } + ColumnInfo targetColumn = targetTable.getColumn(sequence.columnName()); + _log.info("{} autoinc: {}", targetColumn.getName(), targetColumn.isAutoIncrement()); + String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), targetColumn.getSelectIdentifier().getId()) + .getObject(String.class); + new SqlExecutor(targetScope).execute("SELECT setval(?, ?)", sequenceName, sequence.lastValue()); } } catch (Exception e) @@ -2210,7 +2232,7 @@ else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the view } } - // We can't handle provisioned tables yet, so (for now) delete all audit table remnants, allowing the new DB to create these tables on restart + // We can't handle provisioned tables yet, so (for now) delete all copied audit table remnants, allowing the new DB to create these tables on restart SqlExecutor executor = new SqlExecutor(targetScope); executor.execute("DELETE FROM exp.PropertyDomain WHERE DomainId IN (SELECT DomainID FROM exp.DomainDescriptor WHERE storageschemaname = 'audit')"); executor.execute("DELETE FROM exp.DomainDescriptor WHERE storageschemaname = 'audit'"); From 06aea274280c15447b15f03a608a0ef10e464bb7 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Thu, 24 Jul 2025 09:57:01 -0700 Subject: [PATCH 03/17] Skip views in source tables. Skip SQL-Server-only column csPath. Log tables that TableSorter removed. --- .../org/labkey/api/module/ModuleLoader.java | 32 ++++++++++++++++--- api/src/org/labkey/api/query/TableSorter.java | 4 --- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/api/src/org/labkey/api/module/ModuleLoader.java b/api/src/org/labkey/api/module/ModuleLoader.java index 5a3346fe507..c364a7a2881 100644 --- a/api/src/org/labkey/api/module/ModuleLoader.java +++ b/api/src/org/labkey/api/module/ModuleLoader.java @@ -129,6 +129,7 @@ import java.util.Comparator; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -2151,8 +2152,18 @@ private void migrateDatabase() Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); - for (TableInfo targetTable : TableSorter.sort(targetSchema, true)) + List sortedTables = TableSorter.sort(targetSchema, true); + Set allTables = targetSchema.getTableNames().stream() + .map(targetSchema::getTable) + .collect(Collectors.toCollection(HashSet::new)); + allTables.removeAll(sortedTables); + if (!allTables.isEmpty()) + _log.info("These tables were removed by TableSorter: {}", allTables); + for (TableInfo targetTable : sortedTables) { + // Skip the views and virtual tables (e.g., test.Containers2) + if (targetTable.getTableType() != DatabaseTableType.TABLE) + continue; String targetTableName = targetTable.getName(); if (tablesToIgnore.contains(targetTableName)) continue; @@ -2162,10 +2173,15 @@ private void migrateDatabase() { _log.warn("Source schema has no table named '{}'", targetTableName); } - else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the views + else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the views + { + _log.warn("{} is not a table!", sourceTable); + } + else { Set columnNames = sourceTable.getColumnNameSet().stream() .filter(name -> !"_ts".equals(name)) // TODO: remove only if column has a default set? + .filter(name -> !"csPath".equals(name)) // search.CrawlCollections.csPath is SQL Server-only .collect(Collectors.toSet()); TableSelector sourceSelector = new TableSelector(sourceTable, columnNames).setJdbcCaching(false); @@ -2173,7 +2189,14 @@ else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the view { Collection sourceColumns = sourceSelector.getSelectedColumns(); // Map the source columns to the target columns so we get the right order and casing for INSERT, etc. - Collection targetColumns = sourceColumns.stream().map(sourceCol -> targetTable.getColumn(sourceCol.getName())).toList(); + Collection targetColumns = sourceColumns.stream() + .map(sourceCol -> targetTable.getColumn(sourceCol.getName())) + .peek(targetColumn -> { + if (null == targetColumn) + _log.info("null target column in: {}", sourceTable); + }) + .filter(Objects::nonNull) + .toList(); String q = StringUtils.join(Collections.nCopies(sourceColumns.size(), "?"), ", "); SQLFragment sql = new SQLFragment("INSERT INTO ") .append(targetTable.getSelectName()) @@ -2209,7 +2232,7 @@ else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the view } catch (SQLException e) { - throw new RuntimeException(e); + throw new RuntimeException("Exception while migrating data from " + sourceTable, e); } }); @@ -2217,7 +2240,6 @@ else if (sourceTable.getTableType() == DatabaseTableType.TABLE) // Skip the view if (sequence != null) { ColumnInfo targetColumn = targetTable.getColumn(sequence.columnName()); - _log.info("{} autoinc: {}", targetColumn.getName(), targetColumn.isAutoIncrement()); String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), targetColumn.getSelectIdentifier().getId()) .getObject(String.class); new SqlExecutor(targetScope).execute("SELECT setval(?, ?)", sequenceName, sequence.lastValue()); diff --git a/api/src/org/labkey/api/query/TableSorter.java b/api/src/org/labkey/api/query/TableSorter.java index 757a3b72536..12412f7c21c 100644 --- a/api/src/org/labkey/api/query/TableSorter.java +++ b/api/src/org/labkey/api/query/TableSorter.java @@ -36,10 +36,6 @@ import java.util.Set; import java.util.stream.Stream; -/** - * User: kevink - * Date: 4/30/13 - */ public final class TableSorter { private static final Logger LOG = LogManager.getLogger(TableSorter.class); From c129a969995e0f963ea0a965ca0edce23a97dea7 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 25 Jul 2025 10:34:00 -0700 Subject: [PATCH 04/17] Clarify constant name --- api/src/org/labkey/api/settings/AppProps.java | 2 +- api/src/org/labkey/api/settings/AppPropsImpl.java | 2 +- api/src/org/labkey/api/settings/WriteableAppProps.java | 2 +- .../labkey/core/security/AllowedExternalResourceHosts.java | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/src/org/labkey/api/settings/AppProps.java b/api/src/org/labkey/api/settings/AppProps.java index c7c7cc781fb..a4befe5bce7 100644 --- a/api/src/org/labkey/api/settings/AppProps.java +++ b/api/src/org/labkey/api/settings/AppProps.java @@ -48,7 +48,7 @@ public interface AppProps String EXPERIMENTAL_BLOCKER = "blockMaliciousClients"; String EXPERIMENTAL_RESOLVE_PROPERTY_URI_COLUMNS = "resolve-property-uri-columns"; String DEPRECATED_OBJECT_LEVEL_DISCUSSIONS = "deprecatedObjectLevelDiscussions"; - String ALLOWED_EXTERNAL_RESOURCES = "allowedExternalResources"; + String ADMIN_PROVIDED_ALLOWED_EXTERNAL_RESOURCES = "allowedExternalResources"; String UNKNOWN_VERSION = "Unknown Release Version"; diff --git a/api/src/org/labkey/api/settings/AppPropsImpl.java b/api/src/org/labkey/api/settings/AppPropsImpl.java index 091f0b7e9a6..cf1bb5c4c9d 100644 --- a/api/src/org/labkey/api/settings/AppPropsImpl.java +++ b/api/src/org/labkey/api/settings/AppPropsImpl.java @@ -745,6 +745,6 @@ public List getExternalSourceHosts() @Override public @NotNull String getAllowedExternalResourceHosts() { - return lookupStringValue(ALLOWED_EXTERNAL_RESOURCES, "[]"); + return lookupStringValue(ADMIN_PROVIDED_ALLOWED_EXTERNAL_RESOURCES, "[]"); } } diff --git a/api/src/org/labkey/api/settings/WriteableAppProps.java b/api/src/org/labkey/api/settings/WriteableAppProps.java index b806b0249f4..2012dfe6922 100644 --- a/api/src/org/labkey/api/settings/WriteableAppProps.java +++ b/api/src/org/labkey/api/settings/WriteableAppProps.java @@ -245,7 +245,7 @@ public void setAllowedFileExtensions(Collection allowedFileExtensions) public void setAllowedExternalResourceHosts(String jsonArray) { - storeStringValue(ALLOWED_EXTERNAL_RESOURCES, jsonArray); + storeStringValue(ADMIN_PROVIDED_ALLOWED_EXTERNAL_RESOURCES, jsonArray); } public void setFeatureEnabled(String feature, boolean enabled) diff --git a/core/src/org/labkey/core/security/AllowedExternalResourceHosts.java b/core/src/org/labkey/core/security/AllowedExternalResourceHosts.java index 892e6932e82..d7c09e2041a 100644 --- a/core/src/org/labkey/core/security/AllowedExternalResourceHosts.java +++ b/core/src/org/labkey/core/security/AllowedExternalResourceHosts.java @@ -25,7 +25,7 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.labkey.api.settings.AppProps.ALLOWED_EXTERNAL_RESOURCES; +import static org.labkey.api.settings.AppProps.ADMIN_PROVIDED_ALLOWED_EXTERNAL_RESOURCES; public class AllowedExternalResourceHosts { @@ -74,12 +74,12 @@ public static void saveAllowedHosts(@Nullable Collection allowedHos private static void register(Directive dir, String... hosts) { - ContentSecurityPolicyFilter.registerAllowedSources(ALLOWED_EXTERNAL_RESOURCES, dir, hosts); + ContentSecurityPolicyFilter.registerAllowedSources(ADMIN_PROVIDED_ALLOWED_EXTERNAL_RESOURCES, dir, hosts); } private static void unregister(Directive dir) { - ContentSecurityPolicyFilter.unregisterAllowedSources(ALLOWED_EXTERNAL_RESOURCES, dir); + ContentSecurityPolicyFilter.unregisterAllowedSources(ADMIN_PROVIDED_ALLOWED_EXTERNAL_RESOURCES, dir); } // Returns a mutable list (mutating it won't affect any cached values) From 589b01e4a9a4d9b4ad88df28eda72bcdeebe4696 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 25 Jul 2025 12:15:11 -0700 Subject: [PATCH 05/17] Fix TableSorter handling of self-referencing FKs --- api/src/org/labkey/api/data/ForeignKey.java | 2 +- .../org/labkey/api/module/ModuleLoader.java | 33 ++++++++++++------- api/src/org/labkey/api/query/TableSorter.java | 16 +++++---- 3 files changed, 32 insertions(+), 19 deletions(-) diff --git a/api/src/org/labkey/api/data/ForeignKey.java b/api/src/org/labkey/api/data/ForeignKey.java index e751e96da80..baeb862751e 100644 --- a/api/src/org/labkey/api/data/ForeignKey.java +++ b/api/src/org/labkey/api/data/ForeignKey.java @@ -94,7 +94,7 @@ default String getLookupSchemaName() return Objects.toString(getLookupSchemaKey(),null); } - /* Schema path relative to the DefaultSchema (e.g. container) */ + /* Schema path relative to the DefaultSchema (e.g., container) */ SchemaKey getLookupSchemaKey(); /** diff --git a/api/src/org/labkey/api/module/ModuleLoader.java b/api/src/org/labkey/api/module/ModuleLoader.java index c364a7a2881..0fcb2f43718 100644 --- a/api/src/org/labkey/api/module/ModuleLoader.java +++ b/api/src/org/labkey/api/module/ModuleLoader.java @@ -2013,10 +2013,10 @@ private void handleDatabaseMigration() { clearSchemas(); verifyEmptySchemas(); - } - if (getMigrationDataSource() != null) - migrateDatabase(); + if (getMigrationDataSource() != null) + migrateDatabase(getMigrationDataSource()); + } } // As part of SQL Server -> PostgreSQL migration, clear containers needed for bootstrap @@ -2079,10 +2079,9 @@ private void verifyEmptySchemas() private record Sequence(String schemaName, String tableName, String columnName, int lastValue) {} - private void migrateDatabase() + private void migrateDatabase(String migrationDataSource) { DbScope targetScope = DbScope.getLabKeyScope(); - String migrationDataSource = getMigrationDataSource(); DbScope sourceScope = DbScope.getDbScope(migrationDataSource); if (null == sourceScope) throw new ConfigurationException("Migration data source not found: " + migrationDataSource); @@ -2151,22 +2150,34 @@ private void migrateDatabase() } Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); + Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); + List sortedTables = TableSorter.sort(targetSchema, true); Set allTables = targetSchema.getTableNames().stream() .map(targetSchema::getTable) .collect(Collectors.toCollection(HashSet::new)); allTables.removeAll(sortedTables); + if (!allTables.isEmpty()) + { _log.info("These tables were removed by TableSorter: {}", allTables); - for (TableInfo targetTable : sortedTables) + if (targetSchema.getName().equals("comm")) + { + sortedTables.add(targetSchema.getTable("Pages")); + sortedTables.add(targetSchema.getTable("PageVersions")); + } + } + + List tablesToCopy = sortedTables.stream() + // Skip all views and virtual tables (e.g., test.Containers2, which is a table on SS but a view on PG) + .filter(table -> table.getTableType() == DatabaseTableType.TABLE) + .filter(table -> !tablesToIgnore.contains(table.getName())) + .toList(); + + for (TableInfo targetTable : tablesToCopy) { - // Skip the views and virtual tables (e.g., test.Containers2) - if (targetTable.getTableType() != DatabaseTableType.TABLE) - continue; String targetTableName = targetTable.getName(); - if (tablesToIgnore.contains(targetTableName)) - continue; SchemaTableInfo sourceTable = sourceSchema.getTable(targetTableName); if (sourceTable == null) diff --git a/api/src/org/labkey/api/query/TableSorter.java b/api/src/org/labkey/api/query/TableSorter.java index 12412f7c21c..b42f54a12e0 100644 --- a/api/src/org/labkey/api/query/TableSorter.java +++ b/api/src/org/labkey/api/query/TableSorter.java @@ -15,7 +15,6 @@ */ package org.labkey.api.query; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.labkey.api.collections.CaseInsensitiveHashMap; import org.labkey.api.collections.CaseInsensitiveHashSet; @@ -25,6 +24,7 @@ import org.labkey.api.data.MultiValuedForeignKey; import org.labkey.api.data.TableInfo; import org.labkey.api.util.Tuple3; +import org.labkey.api.util.logging.LogHelper; import java.util.ArrayList; import java.util.Collections; @@ -38,7 +38,7 @@ public final class TableSorter { - private static final Logger LOG = LogManager.getLogger(TableSorter.class); + private static final Logger LOG = LogHelper.getLogger(TableSorter.class, "Warnings about foreign key traversal"); /** * Get a topologically sorted list of TableInfos within this schema. @@ -105,6 +105,7 @@ private static List sort(String schemaName, Map ta if (fk == null || fk instanceof RowIdForeignKey || fk instanceof MultiValuedForeignKey) continue; + String lookupSchemaName = fk.getLookupSchemaName(); // Unfortunately, we need to get the lookup table since some FKs don't expose .getLookupSchemaName() or .getLookupTableName() TableInfo t = null; try @@ -115,12 +116,12 @@ private static List sort(String schemaName, Map ta { // ignore and try to continue String msg = String.format("Failed to traverse fk (%s, %s, %s) from (%s, %s)", - fk.getLookupSchemaName(), fk.getLookupTableName(), fk.getLookupColumnName(), tableName, column.getName()); + lookupSchemaName, fk.getLookupTableName(), fk.getLookupColumnName(), tableName, column.getName()); LOG.warn(msg, qpe); } // Skip lookups to other schemas - if (!(schemaName.equalsIgnoreCase(fk.getLookupSchemaName()) || (t != null && schemaName.equalsIgnoreCase(t.getPublicSchemaName())))) + if (!(schemaName.equalsIgnoreCase(lookupSchemaName) || (t != null && schemaName.equalsIgnoreCase(t.getPublicSchemaName())))) continue; // Get the lookupTableName: Attempt to use FK name first, then use the actual table name if it exists and is in the set of known tables. @@ -129,7 +130,7 @@ private static List sort(String schemaName, Map ta lookupTableName = t.getName(); // Skip self-referencing FKs - if (schemaName.equalsIgnoreCase(fk.getLookupSchemaName()) && lookupTableName.equals(table.getName())) + if (schemaName.equalsIgnoreCase(lookupSchemaName) && lookupTableName.equalsIgnoreCase(table.getName())) continue; // Remove the lookup table from the set of tables with no incoming FK @@ -185,6 +186,7 @@ private static void depthFirstWalk(String schemaName, Map tab if (fk == null || fk instanceof RowIdForeignKey || fk instanceof MultiValuedForeignKey) continue; + String lookupSchemaName = fk.getLookupSchemaName(); // Unfortunately, we need to get the lookup table since some FKs don't expose .getLookupSchemaName() or .getLookupTableName() TableInfo t = null; try @@ -197,7 +199,7 @@ private static void depthFirstWalk(String schemaName, Map tab } // Skip lookups to other schemas - if (!(schemaName.equalsIgnoreCase(fk.getLookupSchemaName()) || (t != null && schemaName.equalsIgnoreCase(t.getPublicSchemaName())))) + if (!(schemaName.equalsIgnoreCase(lookupSchemaName) || (t != null && schemaName.equalsIgnoreCase(t.getPublicSchemaName())))) continue; // Get the lookupTableName: Attempt to use FK name first, then use the actual table name if it exists and is in the set of known tables. @@ -206,7 +208,7 @@ private static void depthFirstWalk(String schemaName, Map tab lookupTableName = t.getName(); // Skip self-referencing FKs - if (schemaName.equalsIgnoreCase(fk.getLookupSchemaName()) && lookupTableName.equals(table.getName())) + if (schemaName.equalsIgnoreCase(lookupSchemaName) && lookupTableName.equalsIgnoreCase(table.getName())) continue; // Continue depthFirstWalk if the lookup table is found in the schema (e.g. it exists in this schema and isn't a query) From 527f04e0d9456d0d1eb48b03debe072127d68eeb Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 25 Jul 2025 14:59:27 -0700 Subject: [PATCH 06/17] Move migration code to a separate class --- .../labkey/api/module/DatabaseMigration.java | 329 ++++++++++++++++++ .../org/labkey/api/module/ModuleLoader.java | 290 +-------------- 2 files changed, 331 insertions(+), 288 deletions(-) create mode 100644 api/src/org/labkey/api/module/DatabaseMigration.java diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java new file mode 100644 index 00000000000..de28a054d2e --- /dev/null +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -0,0 +1,329 @@ +package org.labkey.api.module; + +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.Nullable; +import org.labkey.api.collections.CaseInsensitiveHashSet; +import org.labkey.api.collections.LabKeyCollectors; +import org.labkey.api.collections.Sets; +import org.labkey.api.data.ColumnInfo; +import org.labkey.api.data.CoreSchema; +import org.labkey.api.data.DatabaseTableType; +import org.labkey.api.data.DbSchema; +import org.labkey.api.data.DbSchemaType; +import org.labkey.api.data.DbScope; +import org.labkey.api.data.JdbcType; +import org.labkey.api.data.SQLFragment; +import org.labkey.api.data.SchemaTableInfo; +import org.labkey.api.data.SqlExecutor; +import org.labkey.api.data.SqlSelector; +import org.labkey.api.data.Table; +import org.labkey.api.data.TableInfo; +import org.labkey.api.data.TableSelector; +import org.labkey.api.query.TableSorter; +import org.labkey.api.util.ConfigurationException; +import org.labkey.api.util.StringUtilsLabKey; +import org.labkey.api.util.logging.LogHelper; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +// Handles SQL Server to PostgreSQL data migration +public class DatabaseMigration +{ + private static final Logger LOG = LogHelper.getLogger(DatabaseMigration.class, "Progress of SQL Server to PostgreSQL database migration"); + + // If associated properties are set: clear schemas, verify empty schemas, and migrate data from the external SQL + // Server data source into the just-created empty PostgreSQL schemas. + public static void migrate(boolean shouldInsertData, @Nullable String migrationDataSource) + { + if (!shouldInsertData) + { + clearSchemas(); + verifyEmptySchemas(); + + if (migrationDataSource != null) + migrateDatabase(migrationDataSource); + } + } + + // Clear containers needed for bootstrap + private static void clearSchemas() + { + TableInfo containers = CoreSchema.getInstance().getTableInfoContainers(); + Table.delete(containers); // Now that we've bootstrapped, delete root and shared containers + DbScope targetScope = DbScope.getLabKeyScope(); + new SqlExecutor(targetScope).execute("ALTER SEQUENCE core.containers_rowid_seq RESTART"); // Reset Containers sequence + } + + // Verify that no data rows were inserted and no sequences were incremented + private static void verifyEmptySchemas() + { + DbScope scope = DbScope.getLabKeyScope(); + + Map> schemaMap = scope.getSchemaNames().stream() + .map(name -> scope.getSchema(name, DbSchemaType.Unknown)) + .collect(Collectors.partitioningBy(schema -> schema.isModuleSchema() && schema.getModule().getSupportedDatabasesSet().contains(SupportedDatabase.mssql))); + + List targetSchemas = schemaMap.get(true); + List tableWarnings = targetSchemas.stream() + .flatMap(schema -> schema.getTableNames().stream() + .map(schema::getTable) + .filter(table -> table.getTableType() != DatabaseTableType.NOT_IN_DB) + ) + .map(table -> { + long rowCount = new TableSelector(table).getRowCount(); + if (rowCount > 0) + return table.getSelectName() + " has " + StringUtilsLabKey.pluralize(rowCount, "row"); + else + return null; + }) + .filter(Objects::nonNull) + .toList(); + + if (!tableWarnings.isEmpty()) + { + LOG.warn("{} rows", StringUtilsLabKey.pluralize(tableWarnings.size(), "table has", "tables have")); + tableWarnings.forEach(LOG::warn); + } + + List schemasToIgnore = schemaMap.get(false).stream() + .map(DbSchema::getName) + .toList(); + String qs = StringUtils.join(Collections.nCopies(schemasToIgnore.size(), "?"), ", "); + List sequenceWarnings = new SqlSelector(scope, new SQLFragment( + "SELECT schemaname || '.' || sequencename FROM pg_sequences WHERE last_value IS NOT NULL AND schemaname NOT IN (" + qs + ")", + schemasToIgnore + )) + .stream(String.class) + .toList(); + + if (!sequenceWarnings.isEmpty()) + { + LOG.warn("{} a value:", StringUtilsLabKey.pluralize(sequenceWarnings.size(), "sequence has", "sequences have")); + sequenceWarnings.forEach(LOG::warn); + } + } + + private record Sequence(String schemaName, String tableName, String columnName, int lastValue) {} + + private static void migrateDatabase(String migrationDataSource) + { + DbScope targetScope = DbScope.getLabKeyScope(); + DbScope sourceScope = DbScope.getDbScope(migrationDataSource); + if (null == sourceScope) + throw new ConfigurationException("Migration data source not found: " + migrationDataSource); + if (!sourceScope.getSqlDialect().isSqlServer()) + throw new ConfigurationException("Migration data source is not SQL Server: " + migrationDataSource); + + // Verify that all sequences in the target schema have an increment of 1, since that's an assumption below + Collection sequencesNonOneIncrement = new SqlSelector(targetScope, new SQLFragment("SELECT schemaname || '.' || sequencename || ': ' || increment_by FROM pg_sequences WHERE increment_by != 1")).getCollection(String.class); + if (!sequencesNonOneIncrement.isEmpty()) + { + throw new IllegalStateException(StringUtilsLabKey.pluralize(sequencesNonOneIncrement.size(), "sequence has", "sequences have") + " an increment other than 1: " + sequencesNonOneIncrement); + } + + // Select the SQL Server sequences with non-null last value. We'll use the results to set PostgreSQL sequences after copying data. + String sequenceQuery = """ + SELECT + OBJECT_SCHEMA_NAME(tables.object_id, db_id()) AS SchemaName, + tables.name AS TableName, + identity_columns.name AS ColumnName, + identity_columns.seed_value, + identity_columns.increment_value, + identity_columns.last_value + FROM + sys.tables tables + JOIN + sys.identity_columns identity_columns ON tables.object_id = identity_columns.object_id + WHERE last_value IS NOT NULL"""; + Map> sequences = new HashMap<>(); + new SqlSelector(sourceScope, sequenceQuery).forEach(rs -> { + Sequence sequence = new Sequence(rs.getString("SchemaName"), rs.getString("TableName"), rs.getString("ColumnName"), rs.getInt("last_value")); + Map schemaMap = sequences.computeIfAbsent(sequence.schemaName(), s -> new HashMap<>()); + schemaMap.put(sequence.tableName(), sequence); + }); + + // Get target schemas in module order, which helps with foreign key relationships + List schemas = ModuleLoader.getInstance().getModules().stream() + .flatMap(module -> module.getSchemaNames().stream().filter(name -> !module.getProvisionedSchemaNames().contains(name))) + .map(name -> targetScope.getSchema(name, DbSchemaType.Module)) + .toList(); + + for (DbSchema targetSchema : schemas) + { + if (targetSchema.getName().equals("comm")) + { + new SqlExecutor(targetSchema).execute("ALTER TABLE comm.Pages DROP CONSTRAINT FK_Pages_PageVersions"); + } + DbSchema sourceSchema = sourceScope.getSchema(targetSchema.getName(), DbSchemaType.Bare); + if (!sourceSchema.existsInDatabase()) + { + LOG.warn("{} has no schema named '{}'", migrationDataSource, targetSchema.getName()); + } + else + { + List sourceTables = getTables(sourceSchema); + List targetTables = getTables(targetSchema); + + if (sourceTables.size() == targetTables.size()) + { + LOG.debug("{} schemas have the same number of tables ({})", sourceSchema.getName(), targetTables.size()); + } + else + { + LOG.warn("{} schemas have different table counts, {} vs. {}", sourceSchema.getName(), sourceTables.size(), targetTables.size()); + Set sourceTableNames = sourceTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set targetTableNames = targetTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); + sourceTableNames.removeAll(targetTableNames); + targetTableNames.removeAll(sourceTableNamesCopy); + LOG.warn("Differences: {} and {}", sourceTableNames, targetTableNames); + } + + Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); + + Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); + + List sortedTables = TableSorter.sort(targetSchema, true); + Set allTables = targetSchema.getTableNames().stream() + .map(targetSchema::getTable) + .collect(Collectors.toCollection(HashSet::new)); + allTables.removeAll(sortedTables); + + if (!allTables.isEmpty()) + { + LOG.info("These tables were removed by TableSorter: {}", allTables); + if (targetSchema.getName().equals("comm")) + { + sortedTables.add(targetSchema.getTable("Pages")); + sortedTables.add(targetSchema.getTable("PageVersions")); + } + } + + List tablesToCopy = sortedTables.stream() + // Skip all views and virtual tables (e.g., test.Containers2, which is a table on SS but a view on PG) + .filter(table -> table.getTableType() == DatabaseTableType.TABLE) + .filter(table -> !tablesToIgnore.contains(table.getName())) + .toList(); + + for (TableInfo targetTable : tablesToCopy) + { + String targetTableName = targetTable.getName(); + SchemaTableInfo sourceTable = sourceSchema.getTable(targetTableName); + + if (sourceTable == null) + { + LOG.warn("Source schema has no table named '{}'", targetTableName); + } + else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the views + { + LOG.warn("{} is not a table!", sourceTable); + } + else + { + // TODO: Pull column names from target table instead?? + Set columnNames = sourceTable.getColumnNameSet().stream() + .filter(name -> !"_ts".equals(name)) // TODO: remove only if column has a default set? + .filter(name -> !"csPath".equals(name)) // search.CrawlCollections.csPath is SQL Server-only + .collect(Collectors.toSet()); + + TableSelector sourceSelector = new TableSelector(sourceTable, columnNames).setJdbcCaching(false); + try (Stream> mapStream = sourceSelector.uncachedMapStream(); Connection conn = targetScope.getConnection()) + { + Collection sourceColumns = sourceSelector.getSelectedColumns(); + // Map the source columns to the target columns so we get the right order and casing for INSERT, etc. + Collection targetColumns = sourceColumns.stream() + .map(sourceCol -> targetTable.getColumn(sourceCol.getName())) + .peek(targetColumn -> { + if (null == targetColumn) + LOG.info("null target column in: {}", sourceTable); + }) + .filter(Objects::nonNull) + .toList(); + String q = StringUtils.join(Collections.nCopies(sourceColumns.size(), "?"), ", "); + SQLFragment sql = new SQLFragment("INSERT INTO ") + .append(targetTable.getSelectName()) + .append("("); + + String sep = ""; + for (ColumnInfo targetColumn : targetColumns) + { + sql.append(sep) + .append(targetColumn.getSelectIdentifier().getSql()); + sep = ", "; + } + + sql.append(") VALUES (") + .append(q) + .append(")"); + + PreparedStatement statement = conn.prepareStatement(sql.getRawSQL()); + + mapStream.forEach(map -> { + try + { + int i = 1; + for (ColumnInfo col : sourceColumns) + { + String name = col.getName(); + Object value = col.getValue(map); + if (name.equalsIgnoreCase("Deleted") && targetTable.getColumn(name).getJdbcType() == JdbcType.INTEGER) + value = (boolean)value ? 1 : 0; + statement.setObject(i++, value); + } + statement.execute(); + } + catch (SQLException e) + { + throw new RuntimeException("Exception while migrating data from " + sourceTable, e); + } + }); + + Sequence sequence = schemaSequenceMap.get(targetTable.getName()); + if (sequence != null) + { + ColumnInfo targetColumn = targetTable.getColumn(sequence.columnName()); + String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), targetColumn.getSelectIdentifier().getId()) + .getObject(String.class); + new SqlExecutor(targetScope).execute("SELECT setval(?, ?)", sequenceName, sequence.lastValue()); + } + } + catch (Exception e) + { + LOG.error("Exception: ", e); + } + } + } + } + } + + // We can't handle provisioned tables yet, so (for now) delete all copied audit table remnants, allowing the new DB to create these tables on restart + SqlExecutor executor = new SqlExecutor(targetScope); + executor.execute("DELETE FROM exp.PropertyDomain WHERE DomainId IN (SELECT DomainID FROM exp.DomainDescriptor WHERE storageschemaname = 'audit')"); + executor.execute("DELETE FROM exp.DomainDescriptor WHERE storageschemaname = 'audit'"); + executor.execute("DELETE FROM exp.PropertyDescriptor WHERE PropertyId NOT IN (SELECT PropertyId FROM exp.PropertyDomain)"); + + System.exit(0); + } + + private static List getTables(DbSchema schema) + { + return new ArrayList<>(schema.getTableNames().stream() + .map(schema::getTable) + .filter(table -> table.getTableType() == DatabaseTableType.TABLE) + .toList()); + } +} diff --git a/api/src/org/labkey/api/module/ModuleLoader.java b/api/src/org/labkey/api/module/ModuleLoader.java index 0fcb2f43718..8a628ef7d6b 100644 --- a/api/src/org/labkey/api/module/ModuleLoader.java +++ b/api/src/org/labkey/api/module/ModuleLoader.java @@ -36,7 +36,6 @@ import org.labkey.api.collections.CopyOnWriteHashMap; import org.labkey.api.collections.LabKeyCollectors; import org.labkey.api.collections.Sets; -import org.labkey.api.data.ColumnInfo; import org.labkey.api.data.Container; import org.labkey.api.data.ConvertHelper; import org.labkey.api.data.CoreSchema; @@ -45,11 +44,9 @@ import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.DbScope; import org.labkey.api.data.FileSqlScriptProvider; -import org.labkey.api.data.JdbcType; import org.labkey.api.data.RuntimeSQLException; import org.labkey.api.data.SQLFragment; import org.labkey.api.data.SchemaNameCache; -import org.labkey.api.data.SchemaTableInfo; import org.labkey.api.data.SqlExecutor; import org.labkey.api.data.SqlScriptManager; import org.labkey.api.data.SqlScriptRunner; @@ -63,7 +60,6 @@ import org.labkey.api.data.dialect.DatabaseNotSupportedException; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.module.ModuleUpgrader.Execution; -import org.labkey.api.query.TableSorter; import org.labkey.api.resource.Resource; import org.labkey.api.security.SecurityManager; import org.labkey.api.security.UserManager; @@ -118,9 +114,6 @@ import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; @@ -129,7 +122,6 @@ import java.util.Comparator; import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -144,7 +136,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.apache.commons.lang3.StringUtils.equalsIgnoreCase; import static org.apache.commons.lang3.StringUtils.isEmpty; @@ -1889,7 +1880,7 @@ private void startNonCoreUpgradeAndStartup(Execution execution, File lockFile) } } - // Final step in upgrade process: set the upgrade state to complete, perform post-upgrade tasks, and start up the modules. + // Final step in the upgrade process: set the upgrade state to complete, perform post-upgrade tasks, and start up the modules. private void afterUpgrade(File lockFile) { setUpgradeState(UpgradeState.UpgradeComplete); @@ -1902,7 +1893,7 @@ private void afterUpgrade(File lockFile) lockFile.delete(); verifyRequiredModules(); - handleDatabaseMigration(); + DatabaseMigration.migrate(shouldInsertData(), getMigrationDataSource()); } // If the "requiredModules" parameter is present in application.properties then fail startup if any specified module is missing. @@ -2005,283 +1996,6 @@ public boolean shouldInsertData() return _migrationDataSource != null && isNewInstall() && DbScope.getLabKeyScope().getSqlDialect().isPostgreSQL() ? _migrationDataSource : null; } - // If associated properties are set, migrate data from the external SQL Server data source into the just-created - // empty PostgreSQL schemas. - private void handleDatabaseMigration() - { - if (!shouldInsertData()) - { - clearSchemas(); - verifyEmptySchemas(); - - if (getMigrationDataSource() != null) - migrateDatabase(getMigrationDataSource()); - } - } - - // As part of SQL Server -> PostgreSQL migration, clear containers needed for bootstrap - private void clearSchemas() - { - TableInfo containers = CoreSchema.getInstance().getTableInfoContainers(); - Table.delete(containers); // Now that we've bootstrapped, delete root and shared containers - DbScope targetScope = DbScope.getLabKeyScope(); - new SqlExecutor(targetScope).execute("ALTER SEQUENCE core.containers_rowid_seq RESTART"); // Reset Containers sequence - } - - // Verify that no data rows were inserted and no sequences were incremented - private void verifyEmptySchemas() - { - DbScope scope = DbScope.getLabKeyScope(); - - Map> schemaMap = scope.getSchemaNames().stream() - .map(name -> scope.getSchema(name, DbSchemaType.Unknown)) - .collect(Collectors.partitioningBy(schema -> schema.isModuleSchema() && schema.getModule().getSupportedDatabasesSet().contains(SupportedDatabase.mssql))); - - List targetSchemas = schemaMap.get(true); - List tableWarnings = targetSchemas.stream() - .flatMap(schema -> schema.getTableNames().stream() - .map(schema::getTable) - .filter(table -> table.getTableType() != DatabaseTableType.NOT_IN_DB) - ) - .map(table -> { - long rowCount = new TableSelector(table).getRowCount(); - if (rowCount > 0) - return table.getSelectName() + " has " + StringUtilsLabKey.pluralize(rowCount, "row"); - else - return null; - }) - .filter(Objects::nonNull) - .toList(); - - if (!tableWarnings.isEmpty()) - { - _log.warn("{} rows", StringUtilsLabKey.pluralize(tableWarnings.size(), "table has", "tables have")); - tableWarnings.forEach(_log::warn); - } - - List schemasToIgnore = schemaMap.get(false).stream() - .map(DbSchema::getName) - .toList(); - String qs = StringUtils.join(Collections.nCopies(schemasToIgnore.size(), "?"), ", "); - List sequenceWarnings = new SqlSelector(scope, new SQLFragment( - "SELECT schemaname || '.' || sequencename FROM pg_sequences WHERE last_value IS NOT NULL AND schemaname NOT IN (" + qs + ")", - schemasToIgnore - )) - .stream(String.class) - .toList(); - - if (!sequenceWarnings.isEmpty()) - { - _log.warn("{} a value:", StringUtilsLabKey.pluralize(sequenceWarnings.size(), "sequence has", "sequences have")); - sequenceWarnings.forEach(_log::warn); - } - } - - private record Sequence(String schemaName, String tableName, String columnName, int lastValue) {} - - private void migrateDatabase(String migrationDataSource) - { - DbScope targetScope = DbScope.getLabKeyScope(); - DbScope sourceScope = DbScope.getDbScope(migrationDataSource); - if (null == sourceScope) - throw new ConfigurationException("Migration data source not found: " + migrationDataSource); - if (!sourceScope.getSqlDialect().isSqlServer()) - throw new ConfigurationException("Migration data source is not SQL Server: " + migrationDataSource); - - // Verify that all sequences in the target schema have an increment of 1, since that's an assumption below - Collection sequencesNonOneIncrement = new SqlSelector(targetScope, new SQLFragment("SELECT schemaname || '.' || sequencename || ': ' || increment_by FROM pg_sequences WHERE increment_by != 1")).getCollection(String.class); - if (!sequencesNonOneIncrement.isEmpty()) - { - throw new IllegalStateException(StringUtilsLabKey.pluralize(sequencesNonOneIncrement.size(), "sequence has", "sequences have") + " an increment other than 1: " + sequencesNonOneIncrement); - } - - // Select the SQL Server sequences with non-null last value. We'll use the results to set PostgreSQL sequences after copying data. - String sequenceQuery = """ - SELECT - OBJECT_SCHEMA_NAME(tables.object_id, db_id()) AS SchemaName, - tables.name AS TableName, - identity_columns.name AS ColumnName, - identity_columns.seed_value, - identity_columns.increment_value, - identity_columns.last_value - FROM - sys.tables tables - JOIN - sys.identity_columns identity_columns ON tables.object_id = identity_columns.object_id - WHERE last_value IS NOT NULL"""; - Map> sequences = new HashMap<>(); - new SqlSelector(sourceScope, sequenceQuery).forEach(rs -> { - Sequence sequence = new Sequence(rs.getString("SchemaName"), rs.getString("TableName"), rs.getString("ColumnName"), rs.getInt("last_value")); - Map schemaMap = sequences.computeIfAbsent(sequence.schemaName(), s -> new HashMap<>()); - schemaMap.put(sequence.tableName(), sequence); - }); - - // Get target schemas in module order, which helps with foreign key relationships - List schemas = getModules().stream() - .flatMap(module -> module.getSchemaNames().stream().filter(name -> !module.getProvisionedSchemaNames().contains(name))) - .map(name -> targetScope.getSchema(name, DbSchemaType.Module)) - .toList(); - - for (DbSchema targetSchema : schemas) - { - DbSchema sourceSchema = sourceScope.getSchema(targetSchema.getName(), DbSchemaType.Bare); - if (!sourceSchema.existsInDatabase()) - { - _log.warn("{} has no schema named '{}'", migrationDataSource, targetSchema.getName()); - } - else - { - List sourceTables = getTables(sourceSchema); - List targetTables = getTables(targetSchema); - - if (sourceTables.size() == targetTables.size()) - { - _log.debug("{} schemas have the same number of tables ({})", sourceSchema.getName(), targetTables.size()); - } - else - { - _log.warn("{} schemas have different table counts, {} vs. {}", sourceSchema.getName(), sourceTables.size(), targetTables.size()); - Set sourceTableNames = sourceTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); - Set targetTableNames = targetTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); - Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); - sourceTableNames.removeAll(targetTableNames); - targetTableNames.removeAll(sourceTableNamesCopy); - _log.warn("Differences: {} and {}", sourceTableNames, targetTableNames); - } - - Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); - - Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); - - List sortedTables = TableSorter.sort(targetSchema, true); - Set allTables = targetSchema.getTableNames().stream() - .map(targetSchema::getTable) - .collect(Collectors.toCollection(HashSet::new)); - allTables.removeAll(sortedTables); - - if (!allTables.isEmpty()) - { - _log.info("These tables were removed by TableSorter: {}", allTables); - if (targetSchema.getName().equals("comm")) - { - sortedTables.add(targetSchema.getTable("Pages")); - sortedTables.add(targetSchema.getTable("PageVersions")); - } - } - - List tablesToCopy = sortedTables.stream() - // Skip all views and virtual tables (e.g., test.Containers2, which is a table on SS but a view on PG) - .filter(table -> table.getTableType() == DatabaseTableType.TABLE) - .filter(table -> !tablesToIgnore.contains(table.getName())) - .toList(); - - for (TableInfo targetTable : tablesToCopy) - { - String targetTableName = targetTable.getName(); - SchemaTableInfo sourceTable = sourceSchema.getTable(targetTableName); - - if (sourceTable == null) - { - _log.warn("Source schema has no table named '{}'", targetTableName); - } - else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the views - { - _log.warn("{} is not a table!", sourceTable); - } - else - { - Set columnNames = sourceTable.getColumnNameSet().stream() - .filter(name -> !"_ts".equals(name)) // TODO: remove only if column has a default set? - .filter(name -> !"csPath".equals(name)) // search.CrawlCollections.csPath is SQL Server-only - .collect(Collectors.toSet()); - - TableSelector sourceSelector = new TableSelector(sourceTable, columnNames).setJdbcCaching(false); - try (Stream> mapStream = sourceSelector.uncachedMapStream(); Connection conn = targetScope.getConnection()) - { - Collection sourceColumns = sourceSelector.getSelectedColumns(); - // Map the source columns to the target columns so we get the right order and casing for INSERT, etc. - Collection targetColumns = sourceColumns.stream() - .map(sourceCol -> targetTable.getColumn(sourceCol.getName())) - .peek(targetColumn -> { - if (null == targetColumn) - _log.info("null target column in: {}", sourceTable); - }) - .filter(Objects::nonNull) - .toList(); - String q = StringUtils.join(Collections.nCopies(sourceColumns.size(), "?"), ", "); - SQLFragment sql = new SQLFragment("INSERT INTO ") - .append(targetTable.getSelectName()) - .append("("); - - String sep = ""; - for (ColumnInfo targetColumn : targetColumns) - { - sql.append(sep) - .append(targetColumn.getSelectIdentifier().getSql()); - sep = ", "; - } - - sql.append(") VALUES (") - .append(q) - .append(")"); - - PreparedStatement statement = conn.prepareStatement(sql.getRawSQL()); - - mapStream.forEach(map -> { - try - { - int i = 1; - for (ColumnInfo col : sourceColumns) - { - String name = col.getName(); - Object value = col.getValue(map); - if (name.equalsIgnoreCase("Deleted") && targetTable.getColumn(name).getJdbcType() == JdbcType.INTEGER) - value = (boolean)value ? 1 : 0; - statement.setObject(i++, value); - } - statement.execute(); - } - catch (SQLException e) - { - throw new RuntimeException("Exception while migrating data from " + sourceTable, e); - } - }); - - Sequence sequence = schemaSequenceMap.get(targetTable.getName()); - if (sequence != null) - { - ColumnInfo targetColumn = targetTable.getColumn(sequence.columnName()); - String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), targetColumn.getSelectIdentifier().getId()) - .getObject(String.class); - new SqlExecutor(targetScope).execute("SELECT setval(?, ?)", sequenceName, sequence.lastValue()); - } - } - catch (Exception e) - { - _log.error("Exception: ", e); - } - } - } - } - } - - // We can't handle provisioned tables yet, so (for now) delete all copied audit table remnants, allowing the new DB to create these tables on restart - SqlExecutor executor = new SqlExecutor(targetScope); - executor.execute("DELETE FROM exp.PropertyDomain WHERE DomainId IN (SELECT DomainID FROM exp.DomainDescriptor WHERE storageschemaname = 'audit')"); - executor.execute("DELETE FROM exp.DomainDescriptor WHERE storageschemaname = 'audit'"); - executor.execute("DELETE FROM exp.PropertyDescriptor WHERE PropertyId NOT IN (SELECT PropertyId FROM exp.PropertyDomain)"); - - System.exit(0); - } - - private List getTables(DbSchema schema) - { - return new ArrayList<>(schema.getTableNames().stream() - .map(schema::getTable) - .filter(table -> table.getTableType() == DatabaseTableType.TABLE) - .toList()); - } - public void destroy() { // In the case of a startup failure, _modules may be null. We want to allow a context reload to succeed in this case, From c5b858024810556a51d189e2c9f21be97cf70e5a Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 25 Jul 2025 15:39:52 -0700 Subject: [PATCH 07/17] Register handlers for schema-specific functionality --- .../labkey/api/module/DatabaseMigration.java | 93 +++++++++++++------ core/src/org/labkey/core/CoreModule.java | 20 +++- wiki/src/org/labkey/wiki/WikiModule.java | 28 ++++++ 3 files changed, 109 insertions(+), 32 deletions(-) diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index de28a054d2e..61b216e2485 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -4,6 +4,7 @@ import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.labkey.api.collections.CaseInsensitiveHashSet; +import org.labkey.api.collections.CopyOnWriteCaseInsensitiveHashMap; import org.labkey.api.collections.LabKeyCollectors; import org.labkey.api.collections.Sets; import org.labkey.api.data.ColumnInfo; @@ -164,10 +165,6 @@ private static void migrateDatabase(String migrationDataSource) for (DbSchema targetSchema : schemas) { - if (targetSchema.getName().equals("comm")) - { - new SqlExecutor(targetSchema).execute("ALTER TABLE comm.Pages DROP CONSTRAINT FK_Pages_PageVersions"); - } DbSchema sourceSchema = sourceScope.getSchema(targetSchema.getName(), DbSchemaType.Bare); if (!sourceSchema.existsInDatabase()) { @@ -175,6 +172,9 @@ private static void migrateDatabase(String migrationDataSource) } else { + MigrationHandler handler = getHandler(targetSchema); + handler.beforeSchema(targetSchema); + List sourceTables = getTables(sourceSchema); List targetTables = getTables(targetSchema); @@ -195,31 +195,7 @@ private static void migrateDatabase(String migrationDataSource) Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); - Set tablesToIgnore = targetSchema.getName().equals("core") ? Sets.newCaseInsensitiveHashSet("modules", "sqlscripts", "upgradesteps") : Set.of(); - - List sortedTables = TableSorter.sort(targetSchema, true); - Set allTables = targetSchema.getTableNames().stream() - .map(targetSchema::getTable) - .collect(Collectors.toCollection(HashSet::new)); - allTables.removeAll(sortedTables); - - if (!allTables.isEmpty()) - { - LOG.info("These tables were removed by TableSorter: {}", allTables); - if (targetSchema.getName().equals("comm")) - { - sortedTables.add(targetSchema.getTable("Pages")); - sortedTables.add(targetSchema.getTable("PageVersions")); - } - } - - List tablesToCopy = sortedTables.stream() - // Skip all views and virtual tables (e.g., test.Containers2, which is a table on SS but a view on PG) - .filter(table -> table.getTableType() == DatabaseTableType.TABLE) - .filter(table -> !tablesToIgnore.contains(table.getName())) - .toList(); - - for (TableInfo targetTable : tablesToCopy) + for (TableInfo targetTable : handler.getTablesToCopy(targetSchema)) { String targetTableName = targetTable.getName(); SchemaTableInfo sourceTable = sourceSchema.getTable(targetTableName); @@ -307,6 +283,8 @@ else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the view } } } + + handler.afterSchema(targetSchema); } } @@ -326,4 +304,61 @@ private static List getTables(DbSchema schema) .filter(table -> table.getTableType() == DatabaseTableType.TABLE) .toList()); } + + public interface MigrationHandler + { + void beforeSchema(DbSchema targetSchema); + + List getTablesToCopy(DbSchema targetSchema); + + void afterSchema(DbSchema targetSchema); + } + + public static class DefaultMigrationHandler implements MigrationHandler + { + @Override + public void beforeSchema(DbSchema targetSchema) + { + } + + @Override + public List getTablesToCopy(DbSchema targetSchema) + { + List sortedTables = TableSorter.sort(targetSchema, true); + + Set allTables = targetSchema.getTableNames().stream() + .map(targetSchema::getTable) + .collect(Collectors.toCollection(HashSet::new)); + allTables.removeAll(sortedTables); + + if (!allTables.isEmpty()) + { + LOG.info("These tables were removed by TableSorter: {}", allTables); + } + + return sortedTables.stream() + // Skip all views and virtual tables (e.g., test.Containers2, which is a table on SS but a view on PG) + .filter(table -> table.getTableType() == DatabaseTableType.TABLE) + .collect(Collectors.toCollection(ArrayList::new)); // Ensure mutable + } + + @Override + public void afterSchema(DbSchema targetSchema) + { + } + } + + private static final Map MIGRATION_HANDLERS = new CopyOnWriteCaseInsensitiveHashMap<>(); + private static final MigrationHandler DEFAULT_MIGRATION_HANDLER = new DefaultMigrationHandler(); + + public static void registerHandler(DbSchema schema, MigrationHandler handler) + { + MIGRATION_HANDLERS.put(schema.getName(), handler); + } + + private static MigrationHandler getHandler(DbSchema schema) + { + MigrationHandler handler = MIGRATION_HANDLERS.get(schema.getName()); + return handler != null ? handler : DEFAULT_MIGRATION_HANDLER; + } } diff --git a/core/src/org/labkey/core/CoreModule.java b/core/src/org/labkey/core/CoreModule.java index 6e60a28c0f0..165dc6b85a0 100644 --- a/core/src/org/labkey/core/CoreModule.java +++ b/core/src/org/labkey/core/CoreModule.java @@ -81,9 +81,7 @@ import org.labkey.api.data.dialect.SqlDialectRegistry; import org.labkey.api.data.statistics.StatsService; import org.labkey.api.dataiterator.SimpleTranslator; -import org.labkey.api.exp.OntologyManager; import org.labkey.api.exp.property.PropertyService; -import org.labkey.api.exp.property.SystemProperty; import org.labkey.api.exp.property.TestDomainKind; import org.labkey.api.external.tools.ExternalToolsViewService; import org.labkey.api.files.FileBrowserConfigImporter; @@ -91,6 +89,8 @@ import org.labkey.api.files.FileContentService; import org.labkey.api.markdown.MarkdownService; import org.labkey.api.message.settings.MessageConfigService; +import org.labkey.api.module.DatabaseMigration; +import org.labkey.api.module.DatabaseMigration.DefaultMigrationHandler; import org.labkey.api.module.FolderType; import org.labkey.api.module.FolderTypeManager; import org.labkey.api.module.Module; @@ -1267,9 +1267,23 @@ public void moduleStartupComplete(ServletContext servletContext) MessageConfigService.setInstance(new EmailPreferenceConfigServiceImpl()); ContainerManager.addContainerListener(new EmailPreferenceContainerListener()); UserManager.addUserListener(new EmailPreferenceUserListener()); + + DatabaseMigration.registerHandler(CoreSchema.getInstance().getSchema(), new DefaultMigrationHandler() + { + @Override + public List getTablesToCopy(DbSchema targetSchema) + { + List tablesToCopy = super.getTablesToCopy(targetSchema); + tablesToCopy.remove(targetSchema.getTable("Modules")); + tablesToCopy.remove(targetSchema.getTable("SqlScripts")); + tablesToCopy.remove(targetSchema.getTable("UpgradeSteps")); + + return tablesToCopy; + } + }); } - // Issue 7527: Auto-detect missing sql views and attempt to recreate + // Issue 7527: Auto-detect missing SQL views and attempt to recreate private void checkForMissingDbViews() { ModuleLoader.getInstance().getModules().stream() diff --git a/wiki/src/org/labkey/wiki/WikiModule.java b/wiki/src/org/labkey/wiki/WikiModule.java index 01000e6dc7d..322e160d3a5 100644 --- a/wiki/src/org/labkey/wiki/WikiModule.java +++ b/wiki/src/org/labkey/wiki/WikiModule.java @@ -25,8 +25,12 @@ import org.labkey.api.attachments.AttachmentService; import org.labkey.api.data.Container; import org.labkey.api.data.ContainerManager; +import org.labkey.api.data.DbSchema; import org.labkey.api.data.SqlExecutor; +import org.labkey.api.data.TableInfo; import org.labkey.api.module.CodeOnlyModule; +import org.labkey.api.module.DatabaseMigration; +import org.labkey.api.module.DatabaseMigration.DefaultMigrationHandler; import org.labkey.api.module.ModuleContext; import org.labkey.api.module.ModuleLoader; import org.labkey.api.search.SearchService; @@ -120,6 +124,30 @@ public void doStartup(ModuleContext moduleContext) WikiSchema.register(this); WikiController.registerAdminConsoleLinks(); + DatabaseMigration.registerHandler(CommSchema.getInstance().getSchema(), new DefaultMigrationHandler() + { + @Override + public void beforeSchema(DbSchema targetSchema) + { + new SqlExecutor(targetSchema).execute("ALTER TABLE comm.Pages DROP CONSTRAINT FK_Pages_PageVersions"); + } + + @Override + public List getTablesToCopy(DbSchema targetSchema) + { + List tablesToCopy = super.getTablesToCopy(targetSchema); + tablesToCopy.add(targetSchema.getTable("Pages")); + tablesToCopy.add(targetSchema.getTable("PageVersions")); + + return tablesToCopy; + } + + @Override + public void afterSchema(DbSchema targetSchema) + { + new SqlExecutor(targetSchema).execute("ALTER TABLE comm.Pages ADD CONSTRAINT FK_Pages_PageVersions FOREIGN KEY (PageVersionId) REFERENCES comm.PageVersions (RowId)"); + } + }); } private void bootstrap(ModuleContext moduleContext) From 4bcb9d59a4337441219193aa3e0dc261613d4593 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sat, 26 Jul 2025 09:47:45 -0700 Subject: [PATCH 08/17] Inspect target table to determine columns to select from source table --- .../labkey/api/module/DatabaseMigration.java | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index 61b216e2485..a369b586aaf 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -6,7 +6,6 @@ import org.labkey.api.collections.CaseInsensitiveHashSet; import org.labkey.api.collections.CopyOnWriteCaseInsensitiveHashMap; import org.labkey.api.collections.LabKeyCollectors; -import org.labkey.api.collections.Sets; import org.labkey.api.data.ColumnInfo; import org.labkey.api.data.CoreSchema; import org.labkey.api.data.DatabaseTableType; @@ -34,6 +33,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -122,6 +122,8 @@ private record Sequence(String schemaName, String tableName, String columnName, private static void migrateDatabase(String migrationDataSource) { + LOG.info("Starting database migration"); + DbScope targetScope = DbScope.getLabKeyScope(); DbScope sourceScope = DbScope.getDbScope(migrationDataSource); if (null == sourceScope) @@ -178,11 +180,7 @@ private static void migrateDatabase(String migrationDataSource) List sourceTables = getTables(sourceSchema); List targetTables = getTables(targetSchema); - if (sourceTables.size() == targetTables.size()) - { - LOG.debug("{} schemas have the same number of tables ({})", sourceSchema.getName(), targetTables.size()); - } - else + if (sourceTables.size() != targetTables.size()) { LOG.warn("{} schemas have different table counts, {} vs. {}", sourceSchema.getName(), sourceTables.size(), targetTables.size()); Set sourceTableNames = sourceTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); @@ -190,7 +188,7 @@ private static void migrateDatabase(String migrationDataSource) Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); sourceTableNames.removeAll(targetTableNames); targetTableNames.removeAll(sourceTableNamesCopy); - LOG.warn("Differences: {} and {}", sourceTableNames, targetTableNames); + LOG.warn("Table differences: {} and {}", sourceTableNames, targetTableNames); } Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); @@ -204,30 +202,23 @@ private static void migrateDatabase(String migrationDataSource) { LOG.warn("Source schema has no table named '{}'", targetTableName); } - else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the views - { - LOG.warn("{} is not a table!", sourceTable); - } else { - // TODO: Pull column names from target table instead?? - Set columnNames = sourceTable.getColumnNameSet().stream() - .filter(name -> !"_ts".equals(name)) // TODO: remove only if column has a default set? - .filter(name -> !"csPath".equals(name)) // search.CrawlCollections.csPath is SQL Server-only + // Inspect target table to determine column names to select from source table + Set selectColumnNames = targetTable.getColumns().stream() + .filter(column -> column.getWrappedColumnName() == null) // Ignore wrapped columns + .map(ColumnInfo::getName) + .filter(name -> !name.equals("_ts")) .collect(Collectors.toSet()); - TableSelector sourceSelector = new TableSelector(sourceTable, columnNames).setJdbcCaching(false); + TableSelector sourceSelector = new TableSelector(sourceTable, selectColumnNames).setJdbcCaching(false); + try (Stream> mapStream = sourceSelector.uncachedMapStream(); Connection conn = targetScope.getConnection()) { Collection sourceColumns = sourceSelector.getSelectedColumns(); - // Map the source columns to the target columns so we get the right order and casing for INSERT, etc. + // Map the selected source columns to the target columns so we get the right order and casing for INSERT, etc. Collection targetColumns = sourceColumns.stream() .map(sourceCol -> targetTable.getColumn(sourceCol.getName())) - .peek(targetColumn -> { - if (null == targetColumn) - LOG.info("null target column in: {}", sourceTable); - }) - .filter(Objects::nonNull) .toList(); String q = StringUtils.join(Collections.nCopies(sourceColumns.size(), "?"), ", "); SQLFragment sql = new SQLFragment("INSERT INTO ") @@ -273,7 +264,7 @@ else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the view { ColumnInfo targetColumn = targetTable.getColumn(sequence.columnName()); String sequenceName = new SqlSelector(targetSchema, "SELECT pg_get_serial_sequence(?, ?)", targetSchema.getName() + "." + targetTable.getName(), targetColumn.getSelectIdentifier().getId()) - .getObject(String.class); + .getObject(String.class); new SqlExecutor(targetScope).execute("SELECT setval(?, ?)", sequenceName, sequence.lastValue()); } } @@ -294,6 +285,8 @@ else if (sourceTable.getTableType() != DatabaseTableType.TABLE) // Skip the view executor.execute("DELETE FROM exp.DomainDescriptor WHERE storageschemaname = 'audit'"); executor.execute("DELETE FROM exp.PropertyDescriptor WHERE PropertyId NOT IN (SELECT PropertyId FROM exp.PropertyDomain)"); + LOG.info("Database migration is complete"); + System.exit(0); } @@ -324,7 +317,7 @@ public void beforeSchema(DbSchema targetSchema) @Override public List getTablesToCopy(DbSchema targetSchema) { - List sortedTables = TableSorter.sort(targetSchema, true); + Set sortedTables = new LinkedHashSet<>(TableSorter.sort(targetSchema, true)); Set allTables = targetSchema.getTableNames().stream() .map(targetSchema::getTable) From 7d200d77a67ef34cb9d1f1d5260be79b798b3cd9 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sat, 26 Jul 2025 15:42:09 -0700 Subject: [PATCH 09/17] Support skip annotations in comments for convenience --- .../labkey/api/data/SqlScriptExecutor.java | 56 ++++++++++++++++--- .../data/dialect/BasePostgreSqlDialect.java | 7 --- .../postgresql/assay-24.001-24.002.sql | 4 +- .../postgresql/core-0.000-24.000.sql | 4 +- .../core/dialect/PostgreSql92Dialect.java | 11 ++-- 5 files changed, 57 insertions(+), 25 deletions(-) diff --git a/api/src/org/labkey/api/data/SqlScriptExecutor.java b/api/src/org/labkey/api/data/SqlScriptExecutor.java index c63e46594f6..9c14a9b8540 100644 --- a/api/src/org/labkey/api/data/SqlScriptExecutor.java +++ b/api/src/org/labkey/api/data/SqlScriptExecutor.java @@ -15,6 +15,7 @@ */ package org.labkey.api.data; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; @@ -31,6 +32,7 @@ import java.util.Collections; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; /** * Executes a single module upgrade SQL script, including finding calls into Java code that are embedded using @@ -51,13 +53,14 @@ public class SqlScriptExecutor /** * Splits a SQL string into blocks and executes each block, one at a time. Blocks are determined in a dialect-specific * way, using splitPattern and procPattern. - * @param scriptName Name of the SQL script for logging purposes - * @param sql The SQL string to split and execute - * @param splitPattern Dialect-specific regex pattern for splitting normal SQL statements into blocks. Null means no need to split. - * @param procPattern Dialect-specific regex pattern for finding executeJavaCode procedure calls in the SQL. See SqlDialect.getSqlScriptProcPattern() for details. - * @param schema Current schema. Null is allowed for testing purposes. + * + * @param scriptName Name of the SQL script for logging purposes + * @param sql The SQL string to split and execute + * @param splitPattern Dialect-specific regex pattern for splitting normal SQL statements into blocks. Null means no need to split. + * @param procPattern Dialect-specific regex pattern for finding executeJavaCode procedure calls in the SQL. See SqlDialect.getSqlScriptProcPattern() for details. + * @param schema Current schema. Null is allowed for testing purposes. * @param moduleContext Current ModuleContext - * @param conn Connection to use, if non-null + * @param conn Connection to use, if non-null */ public SqlScriptExecutor(String scriptName, String sql, @Nullable Pattern splitPattern, @NotNull Pattern procPattern, @Nullable DbSchema schema, ModuleContext moduleContext, @Nullable Connection conn) { @@ -79,10 +82,49 @@ public void execute() } } + private static final String START_ANNOTATION = "@SkipOnEmptySchemasBegin"; + private static final String END_ANNOTATION = "@SkipOnEmptySchemasEnd"; + private Collection getBlocks() { + String sql; + + if (ModuleLoader.getInstance().shouldInsertData()) + { + sql = _sql; + } + else + { + // Strip all statements (typically inserts) between the above START and END annotations + MutableBoolean skipping = new MutableBoolean(false); + sql = _sql.lines() + .filter(line -> { + if (line.contains(START_ANNOTATION)) + { + if (skipping.booleanValue()) + throw new IllegalStateException("Unexpected " + START_ANNOTATION); + + skipping.setValue(true); + } + + boolean ret = !skipping.booleanValue(); + + if (line.contains(END_ANNOTATION)) + { + if (!skipping.booleanValue()) + throw new IllegalStateException("Unexpected " + END_ANNOTATION); + + skipping.setValue(false); + } + + return ret; + } + ) + .collect(Collectors.joining("\n")); + } + // Strip all comments from the script -- PostgreSQL JDBC driver goes berserk if it sees ; or ? inside a comment - StringBuilder stripped = new SqlScanner(_sql).stripComments(); + StringBuilder stripped = new SqlScanner(sql).stripComments(); Collection sqlBlocks; diff --git a/api/src/org/labkey/api/data/dialect/BasePostgreSqlDialect.java b/api/src/org/labkey/api/data/dialect/BasePostgreSqlDialect.java index f341051b1e8..245fd01920b 100644 --- a/api/src/org/labkey/api/data/dialect/BasePostgreSqlDialect.java +++ b/api/src/org/labkey/api/data/dialect/BasePostgreSqlDialect.java @@ -931,13 +931,6 @@ public DatabaseIdentifier makeDatabaseIdentifier(String alias) private static final Pattern PROC_PATTERN = Pattern.compile("^\\s*SELECT\\s+core\\.(executeJava(?:Upgrade|Initialization)Code\\s*\\(\\s*'(.+)'\\s*\\))\\s*;\\s*$", Pattern.CASE_INSENSITIVE | Pattern.MULTILINE); - @Override - // No need to split up PostgreSQL scripts; execute all statements in a single block (unless we have a special stored proc call). - protected Pattern getSQLScriptSplitPattern() - { - return null; - } - @NotNull @Override protected Pattern getSQLScriptProcPattern() diff --git a/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql b/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql index 8beda105fa1..d86ed7e0ad6 100644 --- a/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql +++ b/assay/resources/schemas/dbscripts/postgresql/assay-24.001-24.002.sql @@ -10,7 +10,7 @@ CREATE TABLE assay.PlateType CONSTRAINT UQ_PlateType_Rows_Cols UNIQUE (Rows, Columns) ); -@SkipOnEmptySchemasBegin +-- @SkipOnEmptySchemasBegin INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (3, 4, '12 well (3x4)'); INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (4, 6, '24 well (4x6)'); INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (6, 8, '48 well (6x8)'); @@ -18,7 +18,7 @@ INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (8, 12, '96 well INSERT INTO assay.PlateType (Rows, Columns, Description) VALUES (16, 24, '384 well (16x24)'); INSERT INTO assay.PlateType (Rows, Columns, Description, Archived) VALUES (32, 48, '1536 well (32x48)', TRUE); INSERT INTO assay.PlateType (Rows, Columns, Description, Archived) VALUES (0, 0, 'Invalid Plate Type (Plates which were created with non-valid row & column combinations)', TRUE); -@SkipOnEmptySchemasEnd +-- @SkipOnEmptySchemasEnd -- Rename type column to assayType ALTER TABLE assay.Plate RENAME COLUMN Type TO AssayType; diff --git a/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql b/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql index 241367f599b..b359bb2d40a 100644 --- a/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql +++ b/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql @@ -472,7 +472,7 @@ CREATE TABLE core.EmailOptions CONSTRAINT PK_EmailOptions PRIMARY KEY (EmailOptionId) ); -@SkipOnEmptySchemasBegin +-- @SkipOnEmptySchemasBegin INSERT INTO core.EmailOptions (EmailOptionId, EmailOption) VALUES (0, 'No Email'); INSERT INTO core.EmailOptions (EmailOptionId, EmailOption) VALUES (1, 'All conversations'); INSERT INTO core.EmailOptions (EmailOptionId, EmailOption) VALUES (2, 'My conversations'); @@ -492,7 +492,7 @@ INSERT INTO core.emailOptions (EmailOptionId, EmailOption, Type) VALUES (702, 'A -- labbook email notification options INSERT INTO core.emailOptions (EmailOptionId, EmailOption, Type) VALUES (801, 'No Email', 'labbook'); INSERT INTO core.emailOptions (EmailOptionId, EmailOption, Type) VALUES (802, 'All emails', 'labbook'); -@SkipOnEmptySchemasEnd +-- @SkipOnEmptySchemasEnd CREATE TABLE core.EmailPrefs ( diff --git a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java index 9e6e7b931bb..5595fda49ce 100644 --- a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java +++ b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java @@ -20,11 +20,10 @@ import org.labkey.api.data.DbScope; import org.labkey.api.data.ParameterMarkerInClauseGenerator; import org.labkey.api.data.SqlSelector; +import org.labkey.api.data.dialect.BasePostgreSqlDialect; import org.labkey.api.data.dialect.DialectStringHandler; import org.labkey.api.data.dialect.JdbcHelper; -import org.labkey.api.data.dialect.BasePostgreSqlDialect; import org.labkey.api.data.dialect.StandardJdbcHelper; -import org.labkey.api.module.ModuleLoader; import org.labkey.api.util.HtmlString; import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.view.template.Warnings; @@ -206,12 +205,10 @@ private static String truncateBytes(String str, int maxBytes) return str; } - private static final Pattern SKIP_INSERTS_PATTERN = Pattern.compile("^\\s*@SkipOnEmptySchemasBegin$(.+?)^\\s*@SkipOnEmptySchemasEnd\\s*$", Pattern.MULTILINE + Pattern.DOTALL); - private static final Pattern STRIP_ANNOTATIONS_PATTERN = Pattern.compile("^\\s*@SkipOnEmptySchemasBegin$|^\\s*@SkipOnEmptySchemasEnd\\s*$", Pattern.MULTILINE + Pattern.DOTALL); - - @Override // Temporary override to help with SQL Server migration + @Override + // No need to split up PostgreSQL scripts; execute all statements in a single block (unless we have a special stored proc call). protected Pattern getSQLScriptSplitPattern() { - return ModuleLoader.getInstance().shouldInsertData() ? STRIP_ANNOTATIONS_PATTERN : SKIP_INSERTS_PATTERN; + return null; } } From 4e619bd594abe9c40035bffb57f3d2c66d9fb59c Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sat, 26 Jul 2025 17:34:11 -0700 Subject: [PATCH 10/17] Provide line number for unexpected annotation --- api/src/org/labkey/api/data/SqlScriptExecutor.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/api/src/org/labkey/api/data/SqlScriptExecutor.java b/api/src/org/labkey/api/data/SqlScriptExecutor.java index 9c14a9b8540..9cca1dbd625 100644 --- a/api/src/org/labkey/api/data/SqlScriptExecutor.java +++ b/api/src/org/labkey/api/data/SqlScriptExecutor.java @@ -16,6 +16,7 @@ package org.labkey.api.data; import org.apache.commons.lang3.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableInt; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; @@ -95,14 +96,16 @@ private Collection getBlocks() } else { - // Strip all statements (typically inserts) between the above START and END annotations + // Strip all statements (typically inserts) between the empty-schema START and END annotations, inclusive MutableBoolean skipping = new MutableBoolean(false); + MutableInt lineCount = new MutableInt(0); sql = _sql.lines() .filter(line -> { + lineCount.increment(); if (line.contains(START_ANNOTATION)) { if (skipping.booleanValue()) - throw new IllegalStateException("Unexpected " + START_ANNOTATION); + throw new IllegalStateException("Unexpected " + START_ANNOTATION + " at line " + lineCount.intValue()); skipping.setValue(true); } @@ -112,7 +115,7 @@ private Collection getBlocks() if (line.contains(END_ANNOTATION)) { if (!skipping.booleanValue()) - throw new IllegalStateException("Unexpected " + END_ANNOTATION); + throw new IllegalStateException("Unexpected " + END_ANNOTATION + " at line " + lineCount.intValue()); skipping.setValue(false); } From f2971a9a696ca7d1a00335c65a8095871883deaf Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sat, 26 Jul 2025 18:13:33 -0700 Subject: [PATCH 11/17] Remove special handling for Deleted columns --- api/src/org/labkey/api/module/DatabaseMigration.java | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index a369b586aaf..d5732ab1214 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -12,7 +12,6 @@ import org.labkey.api.data.DbSchema; import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.DbScope; -import org.labkey.api.data.JdbcType; import org.labkey.api.data.SQLFragment; import org.labkey.api.data.SchemaTableInfo; import org.labkey.api.data.SqlExecutor; @@ -243,14 +242,10 @@ private static void migrateDatabase(String migrationDataSource) try { int i = 1; + for (ColumnInfo col : sourceColumns) - { - String name = col.getName(); - Object value = col.getValue(map); - if (name.equalsIgnoreCase("Deleted") && targetTable.getColumn(name).getJdbcType() == JdbcType.INTEGER) - value = (boolean)value ? 1 : 0; - statement.setObject(i++, value); - } + statement.setObject(i++, col.getValue(map)); + statement.execute(); } catch (SQLException e) From c1b2c92e3ab30ee3c3f26414728864ad97b02311 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sun, 27 Jul 2025 16:32:29 -0700 Subject: [PATCH 12/17] Exit in the empty schemas case as well --- api/src/org/labkey/api/module/DatabaseMigration.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index d5732ab1214..ff5e403cd8f 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -56,6 +56,8 @@ public static void migrate(boolean shouldInsertData, @Nullable String migrationD if (migrationDataSource != null) migrateDatabase(migrationDataSource); + + System.exit(0); } } @@ -281,8 +283,6 @@ private static void migrateDatabase(String migrationDataSource) executor.execute("DELETE FROM exp.PropertyDescriptor WHERE PropertyId NOT IN (SELECT PropertyId FROM exp.PropertyDomain)"); LOG.info("Database migration is complete"); - - System.exit(0); } private static List getTables(DbSchema schema) From 63a1877156a8f9b443562b722ab1c3a444e77963 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Mon, 28 Jul 2025 16:30:30 -0700 Subject: [PATCH 13/17] Don't redirect to a string --- core/src/org/labkey/core/login/LoginController.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/org/labkey/core/login/LoginController.java b/core/src/org/labkey/core/login/LoginController.java index 4114d563504..ecf726d5144 100644 --- a/core/src/org/labkey/core/login/LoginController.java +++ b/core/src/org/labkey/core/login/LoginController.java @@ -1550,7 +1550,7 @@ public ModelAndView getView(SsoRedirectForm form, BindException errors) url = configuration.getUrl(csrf, getViewContext()); - return HttpView.redirect(url.getURIString()); + return HttpView.redirect(url, true); } @Override From e349fa070a3e4a30277a6414d01f178938dd0e7b Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Tue, 29 Jul 2025 15:06:06 -0700 Subject: [PATCH 14/17] Migrate provisioned schemas as well! --- .../api/exp/api/ProvisionedDbSchema.java | 1 - .../api/exp/api/StorageProvisioner.java | 6 ++ .../labkey/api/module/DatabaseMigration.java | 69 +++++++++++++------ .../api/property/StorageProvisionerImpl.java | 42 ++++++++--- 4 files changed, 87 insertions(+), 31 deletions(-) diff --git a/api/src/org/labkey/api/exp/api/ProvisionedDbSchema.java b/api/src/org/labkey/api/exp/api/ProvisionedDbSchema.java index ecd47f2b4f3..090180242d7 100644 --- a/api/src/org/labkey/api/exp/api/ProvisionedDbSchema.java +++ b/api/src/org/labkey/api/exp/api/ProvisionedDbSchema.java @@ -31,7 +31,6 @@ /** * A schema in the underlying database that is populated by tables created (provisioned) dynamically * based on administrator or other input into what columns/fields should be tracked. - * Created by klum on 2/23/14. */ public class ProvisionedDbSchema extends DbSchema { diff --git a/api/src/org/labkey/api/exp/api/StorageProvisioner.java b/api/src/org/labkey/api/exp/api/StorageProvisioner.java index 8412f240440..52a05ca79a0 100644 --- a/api/src/org/labkey/api/exp/api/StorageProvisioner.java +++ b/api/src/org/labkey/api/exp/api/StorageProvisioner.java @@ -77,6 +77,12 @@ static TableInfo createTableInfo(@NotNull Domain domain) */ String ensureStorageTable(Domain domain, DomainKind kind, DbScope scope); + /** + * Used by DatabaseMigration only. Creates the storage table associated with this domain, using the storage table + * name provided by the domain. + */ + void createStorageTable(Domain domain, DomainKind kind, DbScope scope); + void dropNotRequiredIndices(Domain domain); void addMissingRequiredIndices(Domain domain); void addTableIndices(Domain domain, Set indices, TableChange.IndexSizeMode sizeMode); diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index ff5e403cd8f..c99038f1310 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -7,6 +7,8 @@ import org.labkey.api.collections.CopyOnWriteCaseInsensitiveHashMap; import org.labkey.api.collections.LabKeyCollectors; import org.labkey.api.data.ColumnInfo; +import org.labkey.api.data.Container; +import org.labkey.api.data.ContainerManager; import org.labkey.api.data.CoreSchema; import org.labkey.api.data.DatabaseTableType; import org.labkey.api.data.DbSchema; @@ -19,6 +21,9 @@ import org.labkey.api.data.Table; import org.labkey.api.data.TableInfo; import org.labkey.api.data.TableSelector; +import org.labkey.api.exp.api.StorageProvisioner; +import org.labkey.api.exp.property.Domain; +import org.labkey.api.exp.property.PropertyService; import org.labkey.api.query.TableSorter; import org.labkey.api.util.ConfigurationException; import org.labkey.api.util.StringUtilsLabKey; @@ -37,6 +42,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.BiFunction; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -153,20 +159,51 @@ private static void migrateDatabase(String migrationDataSource) JOIN sys.identity_columns identity_columns ON tables.object_id = identity_columns.object_id WHERE last_value IS NOT NULL"""; - Map> sequences = new HashMap<>(); + Map> sequenceMap = new HashMap<>(); new SqlSelector(sourceScope, sequenceQuery).forEach(rs -> { Sequence sequence = new Sequence(rs.getString("SchemaName"), rs.getString("TableName"), rs.getString("ColumnName"), rs.getInt("last_value")); - Map schemaMap = sequences.computeIfAbsent(sequence.schemaName(), s -> new HashMap<>()); + Map schemaMap = sequenceMap.computeIfAbsent(sequence.schemaName(), s -> new HashMap<>()); schemaMap.put(sequence.tableName(), sequence); }); - // Get target schemas in module order, which helps with foreign key relationships - List schemas = ModuleLoader.getInstance().getModules().stream() + // Get the target module schemas in module order, which helps with foreign key relationships + List targetModuleSchemas = ModuleLoader.getInstance().getModules().stream() .flatMap(module -> module.getSchemaNames().stream().filter(name -> !module.getProvisionedSchemaNames().contains(name))) .map(name -> targetScope.getSchema(name, DbSchemaType.Module)) .toList(); - for (DbSchema targetSchema : schemas) + // Migrate all data in the module schemas + migrateSchemas(migrationDataSource, sourceScope, targetScope, targetModuleSchemas, (schema, handler) -> handler.getTablesToCopy(schema), sequenceMap); + + // Create all provisioned tables listed in exp.DomainDescriptor + PropertyService svc = PropertyService.get(); + StorageProvisioner provisioner = StorageProvisioner.get(); + new SqlSelector(targetScope, "SELECT Container, DomainURI, Name FROM exp.DomainDescriptor WHERE StorageSchemaName IS NOT NULL").forEach(rs -> { + Container c = ContainerManager.getForId(rs.getString("Container")); + if (c != null) + { + String domainURI = rs.getString("DomainURI"); + String name = rs.getString("Name"); + Domain d = svc.ensureDomain(c, null, domainURI, name); + provisioner.createStorageTable(d, d.getDomainKind(), targetScope); + } + }); + + // Get the target provisioned schemas + List targetProvisionedSchemas = ModuleLoader.getInstance().getModules().stream() + .flatMap(module -> module.getSchemaNames().stream().filter(name -> module.getProvisionedSchemaNames().contains(name))) + .map(name -> targetScope.getSchema(name, DbSchemaType.Bare)) + .toList(); + + // Migrate all data in the provisioned schemas + migrateSchemas(migrationDataSource, sourceScope, targetScope, targetProvisionedSchemas, (schema, handler) -> getTables(schema), sequenceMap); + + LOG.info("Database migration is complete"); + } + + private static void migrateSchemas(String migrationDataSource, DbScope sourceScope, DbScope targetScope, List targetSchemas, BiFunction> tableProducer, Map> sequenceMap) + { + for (DbSchema targetSchema : targetSchemas) { DbSchema sourceSchema = sourceScope.getSchema(targetSchema.getName(), DbSchemaType.Bare); if (!sourceSchema.existsInDatabase()) @@ -178,23 +215,23 @@ private static void migrateDatabase(String migrationDataSource) MigrationHandler handler = getHandler(targetSchema); handler.beforeSchema(targetSchema); - List sourceTables = getTables(sourceSchema); - List targetTables = getTables(targetSchema); + List sourceTables = getTables(sourceSchema); + List targetTables = getTables(targetSchema); if (sourceTables.size() != targetTables.size()) { LOG.warn("{} schemas have different table counts, {} vs. {}", sourceSchema.getName(), sourceTables.size(), targetTables.size()); - Set sourceTableNames = sourceTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); - Set targetTableNames = targetTables.stream().map(SchemaTableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set sourceTableNames = sourceTables.stream().map(TableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set targetTableNames = targetTables.stream().map(TableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); sourceTableNames.removeAll(targetTableNames); targetTableNames.removeAll(sourceTableNamesCopy); LOG.warn("Table differences: {} and {}", sourceTableNames, targetTableNames); } - Map schemaSequenceMap = sequences.getOrDefault(sourceSchema.getName(), Map.of()); + Map schemaSequenceMap = sequenceMap.getOrDefault(sourceSchema.getName(), Map.of()); - for (TableInfo targetTable : handler.getTablesToCopy(targetSchema)) + for (TableInfo targetTable : tableProducer.apply(targetSchema, handler)) { String targetTableName = targetTable.getName(); SchemaTableInfo sourceTable = sourceSchema.getTable(targetTableName); @@ -275,17 +312,9 @@ private static void migrateDatabase(String migrationDataSource) handler.afterSchema(targetSchema); } } - - // We can't handle provisioned tables yet, so (for now) delete all copied audit table remnants, allowing the new DB to create these tables on restart - SqlExecutor executor = new SqlExecutor(targetScope); - executor.execute("DELETE FROM exp.PropertyDomain WHERE DomainId IN (SELECT DomainID FROM exp.DomainDescriptor WHERE storageschemaname = 'audit')"); - executor.execute("DELETE FROM exp.DomainDescriptor WHERE storageschemaname = 'audit'"); - executor.execute("DELETE FROM exp.PropertyDescriptor WHERE PropertyId NOT IN (SELECT PropertyId FROM exp.PropertyDomain)"); - - LOG.info("Database migration is complete"); } - private static List getTables(DbSchema schema) + private static List getTables(DbSchema schema) { return new ArrayList<>(schema.getTableNames().stream() .map(schema::getTable) diff --git a/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java b/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java index 04ddb0813c5..110529fda40 100644 --- a/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java +++ b/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java @@ -136,7 +136,7 @@ private StorageProvisionerImpl() .expireAfterWrite(1, TimeUnit.DAYS) .build(); - private String _create(DbScope scope, DomainKind kind, Domain domain) + private String _create(DbScope scope, DomainKind kind, Domain domain, boolean useProvidedStorageName) { //noinspection AssertWithSideEffects assert create.start(); @@ -148,18 +148,31 @@ private String _create(DbScope scope, DomainKind kind, Domain domain) DomainDescriptor dd = OntologyManager.getDomainDescriptor(domain.getTypeId()); if (null == dd) { - log.warn("Can't find domain descriptor: " + domain.getTypeId() + " " + domain.getTypeURI()); + log.warn("Can't find domain descriptor: {} {}", domain.getTypeId(), domain.getTypeURI()); transaction.commit(); return null; } String tableName = dd.getStorageTableName(); - if (null != tableName) + + if (useProvidedStorageName) { - transaction.commit(); - return tableName; + if (null == tableName) + { + log.warn("Storage table name was null: {} {}", domain.getTypeId(), domain.getTypeURI()); + transaction.commit(); + return null; + } } + else + { + if (null != tableName) + { + transaction.commit(); + return tableName; + } - tableName = makeTableName(kind, domain); + tableName = makeTableName(kind, domain); + } TableChange change = new TableChange(domain, ChangeType.CreateTable, tableName); Set base = Sets.newCaseInsensitiveHashSet(); @@ -215,12 +228,15 @@ private String _create(DbScope scope, DomainKind kind, Domain domain) throw re; } - DomainDescriptor editDD = dd.edit() + if (!useProvidedStorageName) + { + DomainDescriptor editDD = dd.edit() .setStorageTableName(tableName) .setStorageSchemaName(kind.getStorageSchemaName()) .build(); - OntologyManager.ensureDomainDescriptor(editDD); + OntologyManager.ensureDomainDescriptor(editDD); + } kind.invalidate(domain); @@ -346,7 +362,7 @@ public void addProperties(Domain domain, Collection properties, if (null == domain.getStorageTableName()) { - _create(scope, kind, domain); + _create(scope, kind, domain, false); return; } @@ -826,13 +842,19 @@ public String ensureStorageTable(Domain domain, DomainKind kind, DbScope scop { try (var ignored = SpringActionController.ignoreSqlUpdates()) { - tableName = _create(scope, kind, domain); + tableName = _create(scope, kind, domain, false); } } return tableName; } + @Override + public void createStorageTable(Domain domain, DomainKind kind, DbScope scope) + { + _create(scope, kind, domain, true); + } + enum RequiredIndicesAction { Drop From 8a4fbf530e7b75b1a2c6e1b4850f28b86865f76a Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Thu, 31 Jul 2025 12:36:30 -0700 Subject: [PATCH 15/17] Code review feedback --- .../labkey/api/module/DatabaseMigration.java | 26 +++++++------------ audit/src/org/labkey/audit/AuditLogImpl.java | 1 + .../postgresql/core-0.000-24.000.sql | 4 ++- .../api/property/StorageProvisionerImpl.java | 4 +-- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index c99038f1310..0f08cd2bc11 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -83,7 +83,7 @@ private static void verifyEmptySchemas() Map> schemaMap = scope.getSchemaNames().stream() .map(name -> scope.getSchema(name, DbSchemaType.Unknown)) - .collect(Collectors.partitioningBy(schema -> schema.isModuleSchema() && schema.getModule().getSupportedDatabasesSet().contains(SupportedDatabase.mssql))); + .collect(Collectors.partitioningBy(schema -> schema.getModule() != null && schema.getModule().getSupportedDatabasesSet().contains(SupportedDatabase.mssql))); List targetSchemas = schemaMap.get(true); List tableWarnings = targetSchemas.stream() @@ -215,19 +215,13 @@ private static void migrateSchemas(String migrationDataSource, DbScope sourceSco MigrationHandler handler = getHandler(targetSchema); handler.beforeSchema(targetSchema); - List sourceTables = getTables(sourceSchema); - List targetTables = getTables(targetSchema); - - if (sourceTables.size() != targetTables.size()) - { - LOG.warn("{} schemas have different table counts, {} vs. {}", sourceSchema.getName(), sourceTables.size(), targetTables.size()); - Set sourceTableNames = sourceTables.stream().map(TableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); - Set targetTableNames = targetTables.stream().map(TableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); - Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); - sourceTableNames.removeAll(targetTableNames); - targetTableNames.removeAll(sourceTableNamesCopy); - LOG.warn("Table differences: {} and {}", sourceTableNames, targetTableNames); - } + Set sourceTableNames = getTables(sourceSchema).stream().map(TableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set targetTableNames = getTables(targetSchema).stream().map(TableInfo::getName).collect(LabKeyCollectors.toCaseInsensitiveHashSet()); + Set sourceTableNamesCopy = new CaseInsensitiveHashSet(sourceTableNames); + sourceTableNames.removeAll(targetTableNames); + targetTableNames.removeAll(sourceTableNamesCopy); + if (!sourceTableNames.isEmpty() || !targetTableNames.isEmpty()) + LOG.warn("Table differences in {} schema: {} and {}", sourceSchema.getName(), sourceTableNames, targetTableNames); Map schemaSequenceMap = sequenceMap.getOrDefault(sourceSchema.getName(), Map.of()); @@ -260,14 +254,14 @@ private static void migrateSchemas(String migrationDataSource, DbScope sourceSco .toList(); String q = StringUtils.join(Collections.nCopies(sourceColumns.size(), "?"), ", "); SQLFragment sql = new SQLFragment("INSERT INTO ") - .append(targetTable.getSelectName()) + .append(targetTable) .append("("); String sep = ""; for (ColumnInfo targetColumn : targetColumns) { sql.append(sep) - .append(targetColumn.getSelectIdentifier().getSql()); + .appendIdentifier(targetColumn.getSelectIdentifier()); sep = ", "; } diff --git a/audit/src/org/labkey/audit/AuditLogImpl.java b/audit/src/org/labkey/audit/AuditLogImpl.java index d93a35c3c31..48b89f72463 100644 --- a/audit/src/org/labkey/audit/AuditLogImpl.java +++ b/audit/src/org/labkey/audit/AuditLogImpl.java @@ -93,6 +93,7 @@ public static AuditLogImpl get() private AuditLogImpl() { + // If we're migrating, avoid creating all the audit log tables and inserting the queued events if (ModuleLoader.getInstance().shouldInsertData()) ContextListener.addStartupListener(this); } diff --git a/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql b/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql index b359bb2d40a..095d3c08490 100644 --- a/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql +++ b/core/resources/schemas/dbscripts/postgresql/core-0.000-24.000.sql @@ -46,7 +46,7 @@ CREATE TABLE core.Logins CREATE TABLE core.Principals ( - UserId INT GENERATED BY DEFAULT AS IDENTITY ( MINVALUE 1000 ), -- user or group + UserId SERIAL, -- user or group Container ENTITYID, -- NULL for all users, NOT NULL for _ALL_ groups OwnerId ENTITYID NULL, Name VARCHAR(64), -- email (must contain @ and .), group name (no punctuation), or hidden (no @) @@ -57,6 +57,8 @@ CREATE TABLE core.Principals CONSTRAINT UQ_Principals_Container_Name_OwnerId UNIQUE (Container, Name, OwnerId) ); +SELECT SETVAL('core.Principals_UserId_Seq', 1000); + -- maps users to groups CREATE TABLE core.Members ( diff --git a/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java b/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java index 110529fda40..5e451c144da 100644 --- a/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java +++ b/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java @@ -158,9 +158,7 @@ private String _create(DbScope scope, DomainKind kind, Domain domain, boolean { if (null == tableName) { - log.warn("Storage table name was null: {} {}", domain.getTypeId(), domain.getTypeURI()); - transaction.commit(); - return null; + throw new RuntimeException("Storage table name was null: " + domain.getTypeId() + " " + domain.getTypeURI()); } } else From eae01091b025d97d85052d243f5c1870ae6253cd Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 1 Aug 2025 10:57:43 -0700 Subject: [PATCH 16/17] Work around exp FK cycle --- .../labkey/api/module/DatabaseMigration.java | 1 + api/src/org/labkey/api/query/TableSorter.java | 8 ++++--- .../labkey/experiment/ExperimentModule.java | 23 ++++++++++++++++++- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/api/src/org/labkey/api/module/DatabaseMigration.java b/api/src/org/labkey/api/module/DatabaseMigration.java index 0f08cd2bc11..f3132d5bc92 100644 --- a/api/src/org/labkey/api/module/DatabaseMigration.java +++ b/api/src/org/labkey/api/module/DatabaseMigration.java @@ -236,6 +236,7 @@ private static void migrateSchemas(String migrationDataSource, DbScope sourceSco } else { + LOG.info("Migrating '{}'", targetSchema.getName() + "." + targetTableName); // Inspect target table to determine column names to select from source table Set selectColumnNames = targetTable.getColumns().stream() .filter(column -> column.getWrappedColumnName() == null) // Ignore wrapped columns diff --git a/api/src/org/labkey/api/query/TableSorter.java b/api/src/org/labkey/api/query/TableSorter.java index b42f54a12e0..5af06601395 100644 --- a/api/src/org/labkey/api/query/TableSorter.java +++ b/api/src/org/labkey/api/query/TableSorter.java @@ -159,14 +159,16 @@ private static List sort(String schemaName, Map ta private static void depthFirstWalk(String schemaName, Map tables, TableInfo table, Set visited, LinkedList> visitingPath, List sorted, boolean tolerateLoops) { - if (!tolerateLoops && hasLoop(visitingPath, table)) + if (hasLoop(visitingPath, table)) { String msg = "Loop detected in schema '" + schemaName + "':\n" + formatPath(visitingPath); - if (anyHaveContainerColumn(visitingPath)) + if (!tolerateLoops && anyHaveContainerColumn(visitingPath)) throw new IllegalStateException(msg); LOG.warn(msg); - return; + + if (!tolerateLoops) + return; } if (visited.contains(table)) diff --git a/experiment/src/org/labkey/experiment/ExperimentModule.java b/experiment/src/org/labkey/experiment/ExperimentModule.java index b9ed0c2dc6c..cc805c6acff 100644 --- a/experiment/src/org/labkey/experiment/ExperimentModule.java +++ b/experiment/src/org/labkey/experiment/ExperimentModule.java @@ -32,6 +32,7 @@ import org.labkey.api.data.JdbcType; import org.labkey.api.data.NameGenerator; import org.labkey.api.data.SQLFragment; +import org.labkey.api.data.SqlExecutor; import org.labkey.api.data.SqlSelector; import org.labkey.api.data.TableInfo; import org.labkey.api.data.TableSelector; @@ -70,6 +71,8 @@ import org.labkey.api.exp.xar.LsidUtils; import org.labkey.api.files.FileContentService; import org.labkey.api.files.TableUpdaterFileListener; +import org.labkey.api.module.DatabaseMigration; +import org.labkey.api.module.DatabaseMigration.DefaultMigrationHandler; import org.labkey.api.module.ModuleContext; import org.labkey.api.module.ModuleLoader; import org.labkey.api.module.SpringModule; @@ -87,6 +90,7 @@ import org.labkey.api.usageMetrics.UsageMetricsService; import org.labkey.api.util.JspTestCase; import org.labkey.api.util.PageFlowUtil; +import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.util.SystemMaintenance; import org.labkey.api.view.AlwaysAvailableWebPartFactory; import org.labkey.api.view.BaseWebPartFactory; @@ -850,6 +854,23 @@ HAVING COUNT(*) > 1 return results; }); } + + // Work around foreign key cycle between ExperimentRun <-> ProtocolApplication by temporarily dropping FK_Run_WorfklowTask + DatabaseMigration.registerHandler(OntologyManager.getExpSchema(), new DefaultMigrationHandler() + { + @Override + public void beforeSchema(DbSchema targetSchema) + { + // Yes, the FK name is misspelled + new SqlExecutor(targetSchema).execute("ALTER TABLE exp.ExperimentRun DROP CONSTRAINT FK_Run_WorfklowTask"); + } + + @Override + public void afterSchema(DbSchema targetSchema) + { + new SqlExecutor(targetSchema).execute("ALTER TABLE exp.ExperimentRun ADD CONSTRAINT FK_Run_WorfklowTask FOREIGN KEY (WorkflowTask) REFERENCES exp.ProtocolApplication (RowId) MATCH SIMPLE ON DELETE SET NULL"); + } + }); } @Override @@ -859,7 +880,7 @@ public Collection getSummary(Container c) Collection list = new LinkedList<>(); int runGroupCount = ExperimentService.get().getExperiments(c, null, false, true).size(); if (runGroupCount > 0) - list.add("" + runGroupCount + " Run Group" + (runGroupCount > 1 ? "s" : "")); + list.add(StringUtilsLabKey.pluralize(runGroupCount, "Run Group")); User user = HttpView.currentContext().getUser(); From 83d84aa6905f77ed4373bee444d27d1f92c8de22 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 1 Aug 2025 12:47:20 -0700 Subject: [PATCH 17/17] Run in synchronous mode when migrating --- api/src/org/labkey/api/module/ModuleLoader.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/api/src/org/labkey/api/module/ModuleLoader.java b/api/src/org/labkey/api/module/ModuleLoader.java index 8a628ef7d6b..35d7f87e908 100644 --- a/api/src/org/labkey/api/module/ModuleLoader.java +++ b/api/src/org/labkey/api/module/ModuleLoader.java @@ -636,7 +636,7 @@ private void doInit(Execution execution) throws ServletException { _log.info("Check complete: all LabKey-managed modules are recent enough to upgrade"); } - } + } boolean coreRequiredUpgrade = upgradeCoreModule(lockFile); @@ -764,6 +764,10 @@ public void addStaticWarnings(@NotNull Warnings warnings, boolean showAllWarning if (!modulesRequiringUpgrade.isEmpty() || !additionalSchemasRequiringUpgrade.isEmpty()) setUpgradeState(UpgradeState.UpgradeRequired); + // Don't accept any requests if we're bootstrapping empty schemas or migrating from SQL Server + if (!shouldInsertData()) + execution = Execution.Synchronous; + startNonCoreUpgradeAndStartup(execution, lockFile); _log.info("LabKey Server startup is complete; {}", execution.getLogMessage());