Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions api/src/org/labkey/api/data/DatabaseMigrationService.java
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.labkey.api.data.DatabaseMigrationConfiguration.DefaultDatabaseMigrationConfiguration;
import org.labkey.api.data.DatabaseMigrationService.MigrationSchemaHandler.Sequence;
import org.labkey.api.data.SimpleFilter.AndClause;
import org.labkey.api.data.SimpleFilter.FilterClause;
import org.labkey.api.data.SimpleFilter.InClause;
Expand All @@ -21,6 +22,7 @@
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;

Expand Down Expand Up @@ -62,11 +64,18 @@ default void registerMigrationFilter(MigrationFilter filter) {}
return null;
}

default void copySourceTableToTargetTable(DatabaseMigrationConfiguration configuration, TableInfo sourceTable, TableInfo targetTable, DbSchemaType schemaType, Map<String, Sequence> schemaSequenceMap, MigrationSchemaHandler schemaHandler) {};

interface MigrationSchemaHandler
{
record Sequence(String schemaName, String tableName, String columnName, long lastValue) {}

// Marker for tables to declare themselves as site-wide (no container filtering)
FieldKey SITE_WIDE_TABLE = FieldKey.fromParts("site-wide");

// Dummy value returned from getContainerFieldKey() to ensure that custom getContainerClause() method is called
FieldKey DUMMY_FIELD_KEY = FieldKey.fromParts("DUMMY");

DbSchema getSchema();

void beforeVerification();
Expand All @@ -86,7 +95,7 @@ interface MigrationSchemaHandler
// and/or domain data filtering.)
void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter);

void afterSchema();
void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map<String, Map<String, Sequence>> sequenceMap);
}

class DefaultMigrationSchemaHandler implements MigrationSchemaHandler
Expand Down Expand Up @@ -248,7 +257,7 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte
}

@Override
public void afterSchema()
public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map<String, Map<String, Sequence>> sequenceMap)
{
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ void init()
_currentBinding = _bindings[0];

if (_batchSize < 1 && null == _rowIdIndex && null == _objectIdIndex && null == _objectUriIndex)
_batchSize = Math.max(10, 10000/Math.max(2,_bindings.length));
_batchSize = Math.max(10, 10000/Math.max(2, _currentBinding.length));

Integer contextTxSize = null;
if (_context.getConfigParameters() != null)
Expand Down
4 changes: 3 additions & 1 deletion core/src/org/labkey/core/CoreMigrationSchemaHandler.java
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import org.jetbrains.annotations.Nullable;
import org.labkey.api.data.CompareType;
import org.labkey.api.data.CoreSchema;
import org.labkey.api.data.DatabaseMigrationConfiguration;
import org.labkey.api.data.DatabaseMigrationService;
import org.labkey.api.data.DbSchema;
import org.labkey.api.data.DbSchemaType;
Expand All @@ -20,6 +21,7 @@
import org.labkey.api.util.GUID;

import java.util.List;
import java.util.Map;
import java.util.Set;

class CoreMigrationSchemaHandler extends DatabaseMigrationService.DefaultMigrationSchemaHandler implements DatabaseMigrationService.MigrationFilter
Expand Down Expand Up @@ -158,7 +160,7 @@ public SimpleFilter.FilterClause getContainerClause(TableInfo sourceTable, Field
}

@Override
public void afterSchema()
public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map<String, Map<String, Sequence>> sequenceMap)
{
new SqlExecutor(getSchema()).execute("ALTER TABLE core.Containers ADD CONSTRAINT FK_Containers_Containers FOREIGN KEY (Parent) REFERENCES core.Containers(EntityId)");
new SqlExecutor(getSchema()).execute("ALTER TABLE core.ViewCategory ADD CONSTRAINT FK_ViewCategory_Parent FOREIGN KEY (Parent) REFERENCES core.ViewCategory(RowId)");
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
package org.labkey.experiment;

import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.labkey.api.collections.Sets;
import org.labkey.api.data.DatabaseMigrationConfiguration;
import org.labkey.api.data.DatabaseMigrationService;
import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler;
import org.labkey.api.data.DatabaseMigrationService.DomainFilter;
import org.labkey.api.data.DbSchema;
import org.labkey.api.data.DbSchemaType;
import org.labkey.api.data.DbScope;
import org.labkey.api.data.SQLFragment;
import org.labkey.api.data.SimpleFilter;
import org.labkey.api.data.SimpleFilter.FilterClause;
import org.labkey.api.data.SimpleFilter.InClause;
import org.labkey.api.data.SimpleFilter.OrClause;
import org.labkey.api.data.SimpleFilter.SQLClause;
import org.labkey.api.data.TableInfo;
import org.labkey.api.data.TableSelector;
import org.labkey.api.query.FieldKey;
import org.labkey.api.util.Formats;
import org.labkey.api.util.GUID;
import org.labkey.api.util.logging.LogHelper;
import org.labkey.experiment.api.DataClassDomainKind;

import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

class DataClassMigrationSchemaHandler extends DefaultMigrationSchemaHandler
{
private static final Logger LOG = LogHelper.getLogger(DataClassMigrationSchemaHandler.class, "Data class migration status");

public DataClassMigrationSchemaHandler()
{
super(DataClassDomainKind.getSchema());
}

@Override
public @Nullable FieldKey getContainerFieldKey(TableInfo table)
{
FieldKey fieldKey = super.getContainerFieldKey(table);
return fieldKey != null ? fieldKey : DUMMY_FIELD_KEY; // "DUMMY" case is a data class that lacks an FK
}

@Override
public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set<GUID> containers)
{
final FilterClause clause;

if (containerFieldKey != DUMMY_FIELD_KEY)
{
clause = super.getContainerClause(sourceTable, containerFieldKey, containers);
}
else
{
// There are a couple bad data class provisioned tables that lack an FK to exp.Data. In that case, craft the
// FilterClause explicitly.
clause = new SQLClause(
new SQLFragment("LSID IN (SELECT LSID FROM exp.Data WHERE Container")
.appendInClause(containers, sourceTable.getSqlDialect())
.append(")")
);
}

return clause;
}

@Override
public void addDomainDataFilter(OrClause orClause, DomainFilter filter, TableInfo sourceTable, FieldKey fKey, Set<String> selectColumnNames)
{
// Data classes have a built-in Flag field
addDomainDataFlagFilter(orClause, filter, sourceTable, fKey, selectColumnNames);
}

private static final Set<String> SEQUENCE_TABLES = Sets.newCaseInsensitiveHashSet("protsequence", "nucsequence", "molecule");
private static final Set<Long> SEQUENCE_IDS = new HashSet<>();

@Override
public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter)
{
// TODO: delete orphaned rows in exp.Data and exp.Object
Collection<String> notCopiedLsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class);
if (!notCopiedLsids.isEmpty())
LOG.info(" {} rows not copied", Formats.commaf0.format(notCopiedLsids.size()));

String name = sourceTable.getName();
int idx = name.indexOf('_');
name = name.substring(idx + 1);

if (SEQUENCE_TABLES.contains(name))
{
int startSize = SEQUENCE_IDS.size();
new TableSelector(targetTable, Collections.singleton("Ident")).stream(String.class)
.map(ident -> {
int i = ident.indexOf(':');
try
{
return Long.parseLong(ident.substring(i + 1));
}
catch (Exception e)
{
throw new RuntimeException("Exception trying to split ident on ':' (" + ident + ")", e);
}
})
.forEach(SEQUENCE_IDS::add);
LOG.info(" {} unique SequenceIds were added to the SequenceIdentity set", SEQUENCE_IDS.size() - startSize);
}
}

@Override
public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map<String, Map<String, Sequence>> sequenceMap)
{
// Experiment shouldn't mess with Biologics tables, but it gets the job done

DbScope sourceScope = configuration.getSourceScope();
DbScope targetScope = configuration.getTargetScope();
DbSchema biologicsSourceSchema = sourceScope.getSchema("biologics", DbSchemaType.Migration);
DbSchema biologicsTargetSchema = targetScope.getSchema("biologics", DbSchemaType.Module);

if (biologicsSourceSchema.existsInDatabase() && biologicsTargetSchema.existsInDatabase())
{
TableInfo sourceTable = biologicsSourceSchema.getTable("SequenceIdentity");
TableInfo targetTable = biologicsTargetSchema.getTable("SequenceIdentity");

DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, sequenceMap.get("biologics"), new DefaultMigrationSchemaHandler(biologicsTargetSchema)
{
@Override
public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set<GUID> containers)
{
return new InClause(FieldKey.fromParts("SequenceId"), SEQUENCE_IDS);
}
});
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
import org.jetbrains.annotations.Nullable;
import org.labkey.api.data.CompareType;
import org.labkey.api.data.CompareType.CompareClause;
import org.labkey.api.data.DatabaseMigrationService;
import org.labkey.api.data.DatabaseMigrationConfiguration;
import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler;
import org.labkey.api.data.DbSchema;
import org.labkey.api.data.SQLFragment;
import org.labkey.api.data.SimpleFilter.AndClause;
import org.labkey.api.data.SimpleFilter.FilterClause;
Expand All @@ -15,10 +17,13 @@
import org.labkey.api.exp.OntologyManager;
import org.labkey.api.query.FieldKey;
import org.labkey.api.util.GUID;
import org.labkey.experiment.api.ExperimentServiceImpl;

import java.util.List;
import java.util.Map;
import java.util.Set;

class ExperimentMigrationSchemaHandler extends DatabaseMigrationService.DefaultMigrationSchemaHandler
class ExperimentMigrationSchemaHandler extends DefaultMigrationSchemaHandler
{
public ExperimentMigrationSchemaHandler()
{
Expand All @@ -42,14 +47,23 @@ public void beforeSchema()
{
return switch (table.getName())
{
case "Alias", "ObjectLegacyNames" -> FieldKey.fromParts("DUMMY"); // Unused dummy value -- see override below
case "Alias", "ObjectLegacyNames" -> DUMMY_FIELD_KEY; // Unused dummy value -- see override below
case "DataTypeExclusion" -> FieldKey.fromParts("ExcludedContainer");
case "PropertyDomain" -> FieldKey.fromParts("DomainId", "Container");
case "ProtocolApplication" -> FieldKey.fromParts("RunId", "Container");
default -> super.getContainerFieldKey(table);
};
}

@Override
public List<TableInfo> getTablesToCopy()
{
// No need to populate the MaterialIndexed table -- new server should be completely re-indexed after migration
List<TableInfo> tables = super.getTablesToCopy();
tables.remove(ExperimentServiceImpl.get().getTinfoMaterialIndexed());
return tables;
}

@Override
public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set<GUID> containers)
{
Expand Down Expand Up @@ -104,7 +118,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container
}

@Override
public void afterSchema()
public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map<String, Map<String, Sequence>> sequenceMap)
{
new SqlExecutor(getSchema()).execute("ALTER TABLE exp.ExperimentRun ADD CONSTRAINT FK_Run_WorfklowTask FOREIGN KEY (WorkflowTask) REFERENCES exp.ProtocolApplication (RowId) MATCH SIMPLE ON DELETE SET NULL");
new SqlExecutor(getSchema()).execute("ALTER TABLE exp.Object ADD CONSTRAINT FK_Object_Object FOREIGN KEY (OwnerObjectId) REFERENCES exp.Object (ObjectId)");
Expand Down
Loading