diff --git a/api/src/org/labkey/api/data/RecordFactory.java b/api/src/org/labkey/api/data/RecordFactory.java index abd5470f2b0..4b4c5a201c6 100644 --- a/api/src/org/labkey/api/data/RecordFactory.java +++ b/api/src/org/labkey/api/data/RecordFactory.java @@ -52,7 +52,7 @@ public RecordFactory(Class clazz) { Object[] params = Arrays.stream(_parameters).map(p -> { Object value = m.get(p.getName()); - return ConvertUtils.convert(value, p.getType()); + return value != null ? ConvertUtils.convert(value, p.getType()) : null; }).toArray(); try diff --git a/api/src/org/labkey/api/data/SqlScriptExecutor.java b/api/src/org/labkey/api/data/SqlScriptExecutor.java index 756b29891b0..e8df99cc377 100644 --- a/api/src/org/labkey/api/data/SqlScriptExecutor.java +++ b/api/src/org/labkey/api/data/SqlScriptExecutor.java @@ -221,6 +221,7 @@ public void execute() { LOG.info("Executing {}", upgradeMethod.getDisplayName()); _moduleContext.invokeUpgradeMethod(upgradeMethod); + LOG.info("Finished executing {}", upgradeMethod.getDisplayName()); } } catch (NoSuchMethodException e) diff --git a/api/src/org/labkey/api/data/dialect/PostgreSqlService.java b/api/src/org/labkey/api/data/dialect/PostgreSqlService.java new file mode 100644 index 00000000000..fec37101cbc --- /dev/null +++ b/api/src/org/labkey/api/data/dialect/PostgreSqlService.java @@ -0,0 +1,18 @@ +package org.labkey.api.data.dialect; + +import org.labkey.api.services.ServiceRegistry; + +public interface PostgreSqlService +{ + static PostgreSqlService get() + { + return ServiceRegistry.get().getService(PostgreSqlService.class); + } + + static void setInstance(PostgreSqlService impl) + { + ServiceRegistry.get().registerService(PostgreSqlService.class, impl); + } + + BasePostgreSqlDialect getDialect(); +} diff --git a/core/src/org/labkey/core/CoreModule.java b/core/src/org/labkey/core/CoreModule.java index 0698a8e0f08..4c5cc386958 100644 --- a/core/src/org/labkey/core/CoreModule.java +++ b/core/src/org/labkey/core/CoreModule.java @@ -77,6 +77,7 @@ import org.labkey.api.data.TestSchema; import org.labkey.api.data.WorkbookContainerType; import org.labkey.api.data.dialect.BasePostgreSqlDialect; +import org.labkey.api.data.dialect.PostgreSqlService; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.data.dialect.SqlDialectManager; import org.labkey.api.data.dialect.SqlDialectRegistry; @@ -88,6 +89,7 @@ import org.labkey.api.files.FileBrowserConfigWriter; import org.labkey.api.files.FileContentService; import org.labkey.api.markdown.MarkdownService; +import org.labkey.api.mcp.McpService; import org.labkey.api.message.settings.MessageConfigService; import org.labkey.api.migration.DatabaseMigrationService; import org.labkey.api.module.FolderType; @@ -98,7 +100,6 @@ import org.labkey.api.module.SchemaUpdateType; import org.labkey.api.module.SpringModule; import org.labkey.api.module.Summary; -import org.labkey.api.mcp.McpService; import org.labkey.api.notification.EmailMessage; import org.labkey.api.notification.EmailService; import org.labkey.api.notification.NotificationMenuView; @@ -253,9 +254,9 @@ import org.labkey.core.login.DbLoginAuthenticationProvider; import org.labkey.core.login.DbLoginManager; import org.labkey.core.login.LoginController; +import org.labkey.core.mcp.McpServiceImpl; import org.labkey.core.metrics.SimpleMetricsServiceImpl; import org.labkey.core.metrics.WebSocketConnectionManager; -import org.labkey.core.mcp.McpServiceImpl; import org.labkey.core.notification.EmailPreferenceConfigServiceImpl; import org.labkey.core.notification.EmailPreferenceContainerListener; import org.labkey.core.notification.EmailPreferenceUserListener; @@ -560,11 +561,11 @@ public QuerySchema createSchema(DefaultSchema schema, Module module) ScriptEngineManagerImpl.registerEncryptionMigrationHandler(); McpService.get().register(new CoreMcp()); + PostgreSqlService.setInstance(PostgreSqlDialectFactory::getLatestSupportedDialect); deleteTempFiles(); } - private void deleteTempFiles() { try diff --git a/core/src/org/labkey/core/admin/sql/SqlScriptController.java b/core/src/org/labkey/core/admin/sql/SqlScriptController.java index 73f4e5678a5..a7868284993 100644 --- a/core/src/org/labkey/core/admin/sql/SqlScriptController.java +++ b/core/src/org/labkey/core/admin/sql/SqlScriptController.java @@ -1605,6 +1605,7 @@ public boolean handlePost(UpgradeCodeForm form, BindException errors) throws Exc { LOG.info("Executing {}.{}(ModuleContext moduleContext)", _method.getDeclaringClass().getSimpleName(), _method.getName()); _method.invoke(_code, _ctx); + LOG.info("Finished executing {}.{}(ModuleContext moduleContext)", _method.getDeclaringClass().getSimpleName(), _method.getName()); return true; } diff --git a/core/src/org/labkey/core/dialect/PostgreSqlDialectFactory.java b/core/src/org/labkey/core/dialect/PostgreSqlDialectFactory.java index 5e3fa1914f4..3a8a4845799 100644 --- a/core/src/org/labkey/core/dialect/PostgreSqlDialectFactory.java +++ b/core/src/org/labkey/core/dialect/PostgreSqlDialectFactory.java @@ -24,6 +24,7 @@ import org.junit.Assert; import org.junit.Test; import org.labkey.api.data.dialect.AbstractDialectRetrievalTestCase; +import org.labkey.api.data.dialect.BasePostgreSqlDialect; import org.labkey.api.data.dialect.DatabaseNotSupportedException; import org.labkey.api.data.dialect.JdbcHelperTest; import org.labkey.api.data.dialect.PostgreSqlServerType; @@ -145,6 +146,11 @@ public static PostgreSql_13_Dialect getOldestSupportedDialect() return new PostgreSql_13_Dialect(); } + public static BasePostgreSqlDialect getLatestSupportedDialect() + { + return new PostgreSql_18_Dialect(); + } + public static class DialectRetrievalTestCase extends AbstractDialectRetrievalTestCase { @Override diff --git a/experiment/resources/schemas/dbscripts/sqlserver/exp-26.004-26.005.sql b/experiment/resources/schemas/dbscripts/sqlserver/exp-26.004-26.005.sql new file mode 100644 index 00000000000..b6f3e71a4b5 --- /dev/null +++ b/experiment/resources/schemas/dbscripts/sqlserver/exp-26.004-26.005.sql @@ -0,0 +1,2 @@ +-- SQL Server only +EXEC core.executeJavaUpgradeCode 'shortenAllStorageNames'; diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index 0069378ea20..62b8bcf78d9 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -178,23 +178,27 @@ public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema s DbScope sourceScope = configuration.getSourceScope(); DbScope targetScope = configuration.getTargetScope(); - DbSchema biologicsSourceSchema = sourceScope.getSchema("biologics", DbSchemaType.Migration); - DbSchema biologicsTargetSchema = targetScope.getSchema("biologics", DbSchemaType.Module); - if (biologicsSourceSchema.existsInDatabase() && biologicsTargetSchema.existsInDatabase()) + if (sourceScope.getSchemaNames().contains("biologics") && targetScope.getSchemaNames().contains("biologics")) { - TableInfo sourceTable = biologicsSourceSchema.getTable("SequenceIdentity"); - TableInfo targetTable = biologicsTargetSchema.getTable("SequenceIdentity"); + DbSchema biologicsSourceSchema = sourceScope.getSchema("biologics", DbSchemaType.Migration); + DbSchema biologicsTargetSchema = targetScope.getSchema("biologics", DbSchemaType.Module); - DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, true, null, new DefaultMigrationSchemaHandler(biologicsTargetSchema) + if (biologicsSourceSchema.existsInDatabase() && biologicsTargetSchema.existsInDatabase()) { - @Override - public FilterClause getTableFilterClause(TableInfo sourceTable, Set containers) + TableInfo sourceTable = biologicsSourceSchema.getTable("SequenceIdentity"); + TableInfo targetTable = biologicsTargetSchema.getTable("SequenceIdentity"); + + DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, true, null, new DefaultMigrationSchemaHandler(biologicsTargetSchema) { - // This is a global table, so no container clause. Just query and copy the sequence IDs referenced by data class rows we copied. - return new InClause(FieldKey.fromParts("SequenceId"), SEQUENCE_IDS); - } - }); + @Override + public FilterClause getTableFilterClause(TableInfo sourceTable, Set containers) + { + // This is a global table, so no container clause. Just query and copy the sequence IDs referenced by data class rows we copied. + return new InClause(FieldKey.fromParts("SequenceId"), SEQUENCE_IDS); + } + }); + } } } diff --git a/experiment/src/org/labkey/experiment/ExperimentModule.java b/experiment/src/org/labkey/experiment/ExperimentModule.java index 52c07a06836..130e764ec8b 100644 --- a/experiment/src/org/labkey/experiment/ExperimentModule.java +++ b/experiment/src/org/labkey/experiment/ExperimentModule.java @@ -205,7 +205,7 @@ public String getName() @Override public Double getSchemaVersion() { - return 26.004; + return 26.005; } @Nullable diff --git a/experiment/src/org/labkey/experiment/ExperimentUpgradeCode.java b/experiment/src/org/labkey/experiment/ExperimentUpgradeCode.java index 8c4a644d3cb..49b50d45624 100644 --- a/experiment/src/org/labkey/experiment/ExperimentUpgradeCode.java +++ b/experiment/src/org/labkey/experiment/ExperimentUpgradeCode.java @@ -15,6 +15,8 @@ */ package org.labkey.experiment; +import org.apache.commons.collections4.MultiValuedMap; +import org.apache.commons.collections4.multimap.ArrayListValuedHashMap; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; @@ -25,7 +27,10 @@ import org.labkey.api.audit.SampleTimelineAuditEvent; import org.labkey.api.audit.TransactionAuditProvider; import org.labkey.api.collections.CaseInsensitiveHashSet; +import org.labkey.api.collections.CsvSet; +import org.labkey.api.collections.LabKeyCollectors; import org.labkey.api.data.ColumnInfo; +import org.labkey.api.data.CompareType; import org.labkey.api.data.Container; import org.labkey.api.data.ContainerManager; import org.labkey.api.data.DbSchema; @@ -39,11 +44,16 @@ import org.labkey.api.data.SQLFragment; import org.labkey.api.data.SchemaTableInfo; import org.labkey.api.data.Selector; +import org.labkey.api.data.SimpleFilter; import org.labkey.api.data.SqlExecutor; import org.labkey.api.data.SqlSelector; +import org.labkey.api.data.Table; import org.labkey.api.data.TableInfo; import org.labkey.api.data.TableSelector; import org.labkey.api.data.UpgradeCode; +import org.labkey.api.data.dialect.BasePostgreSqlDialect; +import org.labkey.api.data.dialect.PostgreSqlService; +import org.labkey.api.exp.OntologyManager; import org.labkey.api.exp.PropertyDescriptor; import org.labkey.api.exp.api.ExpSampleType; import org.labkey.api.exp.api.ExperimentService; @@ -64,6 +74,8 @@ import org.labkey.api.security.User; import org.labkey.api.security.roles.SiteAdminRole; import org.labkey.api.settings.AppProps; +import org.labkey.api.util.PageFlowUtil; +import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.util.logging.LogHelper; import org.labkey.experiment.api.DataClass; import org.labkey.experiment.api.DataClassDomainKind; @@ -73,11 +85,14 @@ import org.labkey.experiment.api.MaterialSource; import org.labkey.experiment.api.property.DomainImpl; import org.labkey.experiment.api.property.DomainPropertyImpl; +import org.labkey.experiment.api.property.StorageNameGenerator; import org.labkey.experiment.api.property.StorageProvisionerImpl; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -174,7 +189,6 @@ public static void upgradeAmountsAndUnits(ModuleContext context) } ExperimentService.get().clearCaches(); } - } private static void getAmountAndUnitUpdates(Map sampleMap, Parameter unitsCol, Set amountCols, Unit currentDisplayUnit, Map oldDataMap, Map newDataMap, Map sampleCounts, boolean aliquotFields) @@ -656,5 +670,144 @@ private static void fillRowId(ExpDataClassImpl dc, Domain domain, DbScope scope) LOG.info("DataClass '{}' ({}) populated 'rowId' column, count={}", dc.getName(), dc.getRowId(), count); } + record DomainRecord(Container container, int domainId, String name, String storageSchemaName, String storageTableName) + { + String fullName() + { + return storageSchemaName + "." + storageTableName; + } + } + + record Property(int domainId, int propertyId, String domainName, String name, String storageSchemaName, String storageTableName, String storageColumnName) + { + String fullName() + { + // Have to bracket storage column name since it could have special characters (like dots) + return storageSchemaName + "." + storageTableName + "." + bracketIt(storageColumnName); + } + + // Bracket name and escape any internal ending brackets + private String bracketIt(String name) + { + return "[" + name.replace("]", "]]") + "]"; + } + } + + /** + * Called from exp-26.004-26.005.sql, on SQL Server only + * GitHub Issue 869: Long table/column names cause SQL Server migration to fail + * Query all table & column storage names and rename the ones that are too long for PostgreSQL + * TODO: When this upgrade code is removed, get rid of the StorageProvisionerImpl.makeTableName() method it uses. + */ + @SuppressWarnings("unused") + public static void shortenAllStorageNames(ModuleContext context) + { + if (context.isNewInstall()) + return; + + // The PostgreSQL dialect knows which names are too long + BasePostgreSqlDialect dialect = PostgreSqlService.get().getDialect(); + DbScope scope = DbScope.getLabKeyScope(); + SqlExecutor executor = new SqlExecutor(scope); + + // Stream all the storage table names and rename the ones that are too long for PostgreSQL. The filtering must + // be done in code by the dialect; SQL Server has BYTELENGTH(), but that function returns values that are not + // consistent with our dialect check. Also, it looks like the function's behavior changed starting in SS 2019. + TableInfo tinfoDomainDescriptor = OntologyManager.getTinfoDomainDescriptor(); + SimpleFilter filter = new SimpleFilter(FieldKey.fromString("StorageSchemaName"), null, CompareType.NONBLANK); + filter.addCondition(FieldKey.fromString("StorageTableName"), null, CompareType.NONBLANK); + + new TableSelector(tinfoDomainDescriptor, new CsvSet("Container, DomainId, Name, StorageSchemaName, StorageTableName"), filter, null) + .setJdbcCaching(false) + .stream(DomainRecord.class) + .filter(domain -> dialect.isIdentifierTooLong(domain.storageTableName())) + .forEach(domain -> { + String oldName = domain.fullName(); + String newName = StorageProvisionerImpl.get().makeTableName(dialect, domain.container(), domain.domainId(), domain.name()); + + executor.execute(new SQLFragment("EXEC sp_rename ?, ?").add(oldName).add(newName)); + Table.update(null, tinfoDomainDescriptor, PageFlowUtil.map("StorageTableName", newName), domain.domainId()); + + LOG.info(" Table \"{}\" renamed to \"{}\" ({} bytes)", oldName, newName, newName.getBytes(StandardCharsets.UTF_8).length); + }); + + List badTableNames = new TableSelector(tinfoDomainDescriptor, new CsvSet("StorageTableName"), filter, null) + .setJdbcCaching(false) + .stream(String.class) + .filter(dialect::isIdentifierTooLong) + .toList(); + + if (!badTableNames.isEmpty()) + LOG.error("Some storage table names are still too long!! {}", badTableNames); + + // Collect all the domains that have one or more storage columns names that are too long for PostgreSQL + TableInfo tinfoPropertyDomain = OntologyManager.getTinfoPropertyDomain(); + TableInfo tinfoPropertyDescriptor = OntologyManager.getTinfoPropertyDescriptor(); + SQLFragment sql = new SQLFragment("SELECT dd.DomainId, dd.Name AS DomainName, px.PropertyId, StorageSchemaName, StorageTableName, StorageColumnName, px.Name FROM ") + .append(tinfoDomainDescriptor, "dd") + .append(" INNER JOIN ") + .append(tinfoPropertyDomain, "pd") + .append(" ON dd.DomainId = pd.DomainId INNER JOIN ") + .append(tinfoPropertyDescriptor, "px") + .append(" ON pd.PropertyId = px.PropertyId ") + .append("WHERE StorageSchemaName IS NOT NULL AND StorageTableName IS NOT NULL AND StorageColumnName IS NOT NULL"); + + filter = new SimpleFilter(FieldKey.fromString("StorageSchemaName"), null, CompareType.NONBLANK); + filter.addCondition(FieldKey.fromString("StorageTableName"), null, CompareType.NONBLANK); + filter.addCondition(FieldKey.fromString("StorageColumnName"), null, CompareType.NONBLANK); + MultiValuedMap badDomainMap = new SqlSelector(scope, sql) + .setJdbcCaching(false) + .stream(Property.class) + .filter(property -> dialect.isIdentifierTooLong(property.storageColumnName())) + .collect(LabKeyCollectors.toMultiValuedMap( + property -> new DomainRecord(null, property.domainId(), property.domainName(), property.storageSchemaName(), property.storageTableName()), + property -> property, + ArrayListValuedHashMap::new) + ); + if (!badDomainMap.isEmpty()) + LOG.info(" Found {} with storage column names that are too long for PostgreSQL:", StringUtilsLabKey.pluralize(badDomainMap.keySet().size(), "domain")); + + // Now enumerate the bad domains and rename their bad storage columns using the PostgreSQL truncation rules + badDomainMap.keySet() + .forEach(domain -> { + Collection badColumns = badDomainMap.get(domain); + List badColumnNames = badColumns.stream().map(Property::storageColumnName).toList(); + + // First, populate a new StorageNameGenerator with all the "good" names in this domain so we don't + // accidentally try to re-use one of them + StorageNameGenerator nameGenerator = new StorageNameGenerator(dialect); + SQLFragment domainSql = new SQLFragment("SELECT StorageColumnName FROM ") + .append(tinfoPropertyDomain, "pd") + .append(" INNER JOIN ") + .append(tinfoPropertyDescriptor, "px") + .append(" ON pd.PropertyId = px.PropertyId ") + .append("WHERE DomainId = ? AND StorageColumnName NOT ") + .add(domain.domainId()) + .appendInClause(badColumnNames, scope.getSqlDialect()); + new SqlSelector(scope, domainSql).forEach(String.class, nameGenerator::claimName); + + LOG.info(" Renaming {} in table \"{}\"", StringUtilsLabKey.pluralize(badColumns.size(), "column"), domain.fullName()); + + // Now use that StorageNameGenerator to create new names. Rename the column and update the PropertyDescriptor table. + badColumns.forEach(property -> { + String oldName = property.fullName(); + String newName = nameGenerator.generateColumnName(property.name()); // No need to bracket or quote or escape: JDBC parameter takes care of all special characters + + executor.execute(new SQLFragment("EXEC sp_rename ?, ?, 'COLUMN'").add(oldName).add(newName)); + Table.update(null, tinfoPropertyDescriptor, PageFlowUtil.map("StorageColumnName", newName), property.propertyId()); + + LOG.info(" Column \"{}\" renamed to \"{}\" ({} bytes)", oldName, newName, newName.getBytes(StandardCharsets.UTF_8).length); + }); + }); + + List badColumnNames = new TableSelector(tinfoPropertyDescriptor, new CsvSet("StorageColumnName"), new SimpleFilter(FieldKey.fromString("StorageColumnName"), null, CompareType.NONBLANK), null) + .setJdbcCaching(false) + .stream(String.class) + .filter(dialect::isIdentifierTooLong) + .toList(); + + if (!badColumnNames.isEmpty()) + LOG.error("Some storage column names are still too long!! {}", badColumnNames); + } } diff --git a/experiment/src/org/labkey/experiment/api/property/StorageNameGenerator.java b/experiment/src/org/labkey/experiment/api/property/StorageNameGenerator.java index 16e82fccc48..98895225466 100644 --- a/experiment/src/org/labkey/experiment/api/property/StorageNameGenerator.java +++ b/experiment/src/org/labkey/experiment/api/property/StorageNameGenerator.java @@ -6,6 +6,7 @@ import org.junit.Test; import org.labkey.api.collections.CaseInsensitiveHashSet; import org.labkey.api.data.DbScope; +import org.labkey.api.data.dialect.PostgreSqlService; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.exp.OntologyManager; import org.labkey.api.util.StringUtilsLabKey; @@ -32,7 +33,8 @@ public class StorageNameGenerator public StorageNameGenerator(@NotNull SqlDialect dialect) { - _dialect = dialect; + // GitHub Issue 869: Create SQL Server storage names using PostgreSQL's rules to ensure all tables and columns can migrate + _dialect = dialect.isSqlServer() ? PostgreSqlService.get().getDialect() : dialect; } public String claimName(String name) diff --git a/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java b/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java index a2e110b17ae..933824eadd2 100644 --- a/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java +++ b/experiment/src/org/labkey/experiment/api/property/StorageProvisionerImpl.java @@ -615,8 +615,13 @@ public void changePropertyType(Domain domain, DomainProperty prop) throws Change public String makeTableName(DomainKind kind, Domain domain) { - String rawTableName = String.format("c%sd%s_%s", domain.getContainer().getRowId(), domain.getTypeId(), domain.getName()); - SqlDialect dialect = kind.getScope().getSqlDialect(); + return makeTableName(kind.getScope().getSqlDialect(), domain.getContainer(), domain.getTypeId(), domain.getName()); + } + + // Needed by ExperimentUpgradeCode.shortenAllStorageNames(). When that code is removed, combine this with above variant. + public String makeTableName(SqlDialect dialect, Container c, int typeId, String domainName) + { + String rawTableName = String.format("c%sd%s_%s", c.getRowId(), typeId, domainName); return new StorageNameGenerator(dialect).generateTableName(rawTableName); }