diff --git a/morf-core/src/main/java/org/alfasoftware/morf/dataset/WithMetaDataAdapter.java b/morf-core/src/main/java/org/alfasoftware/morf/dataset/WithMetaDataAdapter.java index 61b964393..6ccd67c2d 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/dataset/WithMetaDataAdapter.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/dataset/WithMetaDataAdapter.java @@ -99,6 +99,7 @@ public Collection tableNames() { return sourceSchema.tableNames(); } + @Override public Collection tables() { Set
tables = new HashSet<>(); diff --git a/morf-core/src/main/java/org/alfasoftware/morf/jdbc/DatabaseMetaDataProvider.java b/morf-core/src/main/java/org/alfasoftware/morf/jdbc/DatabaseMetaDataProvider.java index 1da493303..5752a12b5 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/jdbc/DatabaseMetaDataProvider.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/jdbc/DatabaseMetaDataProvider.java @@ -28,16 +28,20 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder; @@ -116,6 +120,8 @@ public abstract class DatabaseMetaDataProvider implements Schema { private final LoadingCache sequenceCache = CacheBuilder.newBuilder().build(CacheLoader.from(this::loadSequence)); private final Supplier> databaseInformation = Suppliers.memoize(this::loadDatabaseInformation); + protected Supplier> ignoredTables = Suppliers.memoize(this::getIgnoredTables); + protected Supplier> partitionedTables = Suppliers.memoize(this::getPartitionedTables); /** * @param connection The database connection from which meta data should be provided. @@ -148,6 +154,9 @@ private Map loadDatabaseInformation() { } } + protected Set getIgnoredTables() { return new HashSet<>(); } + + protected Set getPartitionedTables() { return new HashSet<>(); } /** * @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase() @@ -306,6 +315,11 @@ protected Map loadAllTableNames() { throw new RuntimeSqlException("Error reading metadata for table ["+tableName+"]", e); } } + // add partitioned tables to list + partitionedTables.get().forEach(table -> { + RealName partionedTableName = createRealName(table, table); + tableNameMappings.put(partionedTableName, partionedTableName); + }); long end = System.currentTimeMillis(); Map tableNameMap = tableNameMappings.build(); @@ -587,7 +601,6 @@ protected ColumnBuilder setColumnAutonumbered(RealName tableName, ColumnBuilder /** * Sets column default value. - * * Note: Uses an empty string for any column other than version. * Database-schema level default values are not supported by ALFA's domain model * hence we don't want to include a default value in the definition of tables. @@ -677,6 +690,7 @@ protected Table loadTable(AName tableName) { final Map primaryKey = loadTablePrimaryKey(realTableName); final Supplier> columns = Suppliers.memoize(() -> loadTableColumns(realTableName, primaryKey)); final Supplier> indexes = Suppliers.memoize(() -> loadTableIndexes(realTableName, false)); + final Supplier partitions = Suppliers.memoize(() -> loadTablePartitions(realTableName)); return new Table() { @Override @@ -698,6 +712,19 @@ public List indexes() { public boolean isTemporary() { return false; } + + @Override + public boolean isPartitioned() { return partitions.get() != null; } + + @Override + public PartitioningRule partitioningRule() { + return partitions.get() == null ? null : partitions.get().partitioningRule(); + } + + @Override + public Partitions partitions() { + return partitions.get(); + } }; } @@ -857,6 +884,11 @@ protected List loadTableIndexes(RealName tableName, boolean returnIgnored } + protected Partitions loadTablePartitions(RealName tableName) { + return null; + } + + /** * Retrieves index name from a result set. * @@ -1112,8 +1144,8 @@ protected static AName named(String name) { /** * Build the SQL to return sequence information from the metadata. - * @param schemaName - * @return + * @param schemaName the schema name + * @return the sql for the sequence */ protected abstract String buildSequenceSql(String schemaName); @@ -1378,6 +1410,11 @@ public int getAutoNumberStart() { throw new UnexpectedDataTypeException(this.toString()); } + @Override + public boolean isPartitioned() { + return false; + } + @Override public String getDefaultValue() { throw new UnexpectedDataTypeException(this.toString()); @@ -1424,5 +1461,10 @@ public ColumnBuilder autoNumbered(int from) { public ColumnBuilder dataType(DataType dataType) { return this; } + + @Override + public ColumnBuilder partitioned() { + return this; + } } } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/jdbc/SqlDialect.java b/morf-core/src/main/java/org/alfasoftware/morf/jdbc/SqlDialect.java index 7da3de0e4..c6c2b755b 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/jdbc/SqlDialect.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/jdbc/SqlDialect.java @@ -47,6 +47,8 @@ import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.DataValueLookup; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.SchemaResource; import org.alfasoftware.morf.metadata.SchemaUtils; @@ -4621,5 +4623,18 @@ public List columns() { public boolean isTemporary() { return isTemporary; } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public Partitions partitions() { + return null; + } } } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/AdditionalMetadata.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/AdditionalMetadata.java index 916d13f42..a6e297470 100644 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/AdditionalMetadata.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/AdditionalMetadata.java @@ -1,6 +1,7 @@ package org.alfasoftware.morf.metadata; import java.util.List; +import java.util.Collection; import java.util.Map; import org.apache.commons.lang3.NotImplementedException; @@ -22,4 +23,23 @@ default Map> ignoredIndexes() { return Map.of(); } + /** + * Provides the names of all partition tables in the database. This applies for now for postgres. Note that the order of + * the tables in the result is not specified. The case of the + * table names may be preserved when logging progress, but should not be relied on for schema + * processing. + * + * @return A collection of all partitioned table names available in the database. + */ + default Collection partitionedTableNames() { throw new NotImplementedException("Not implemented yet."); } + + /** + * Provides the names of all partition tables in the database. This applies for now for postgres. Note that the order of + * the tables in the result is not specified. The case of the + * table names may be preserved when logging progress, but should not be relied on for schema + * processing. + * + * @return A collection of all partition table names available in the database. + */ + default Collection partitionTableNames() { throw new NotImplementedException("Not implemented yet."); } } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Column.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Column.java index 70490e875..930a0a1e7 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Column.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Column.java @@ -78,6 +78,12 @@ public default String getUpperCaseName() { public int getAutoNumberStart(); + /** + * @return True if the column is the partition by source + */ + boolean isPartitioned(); + + /** * Helper for {@link Object#toString()} implementations. * diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/ColumnBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/ColumnBean.java index c6df4bd83..5152245da 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/ColumnBean.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/ColumnBean.java @@ -34,7 +34,8 @@ class ColumnBean extends ColumnTypeBean implements Column { private final String defaultValue; private final boolean autoNumber; private final int autoNumberStart; - + //TODO change to private with appropriate constructors. + protected boolean partitioned; /** * Creates a column with zero precision. @@ -218,4 +219,11 @@ public int getAutoNumberStart() { public String toString() { return this.toStringHelper(); } + + @Override + public boolean isPartitioned() { + return partitioned; + } + + } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/DatePartitionedByPeriodRule.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/DatePartitionedByPeriodRule.java new file mode 100644 index 000000000..3e57859dc --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/DatePartitionedByPeriodRule.java @@ -0,0 +1,41 @@ +package org.alfasoftware.morf.metadata; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang3.tuple.Pair; +import org.joda.time.LocalDate; +import org.joda.time.Period; +import org.joda.time.ReadablePeriod; + +public class DatePartitionedByPeriodRule extends PartitioningByRangeRule { + + public DatePartitionedByPeriodRule(String column, LocalDate startValue, Period period, int count) { + super(column, DataType.DATE, startValue, period, count); + } + + public DatePartitionedByPeriodRule(String column, List> ranges) { + super(column, DataType.DATE, ranges); + } + + @Override + public List> getRanges() { + List> ranges = new ArrayList>(); + + if (startValue != null) { + + ReadablePeriod readablePeriod = increment.toPeriod(); + startValue.plus(readablePeriod); + + int i = count; + for (LocalDate current = startValue; i > 0; i--) { + ranges.add(Pair.of(current, current.plus(readablePeriod))); + current = current.plus(readablePeriod); + } + } else { + ranges.addAll(this.partitions); + } + + return ranges; + } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Index.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Index.java index 99c798361..6cfe37e39 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Index.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Index.java @@ -41,6 +41,15 @@ public interface Index { */ public boolean isUnique(); + /** + * @return True if the index is globally partitioned + */ + boolean isGlobalPartitioned(); + + /** + * @return True if the index is locally partitioned + */ + boolean isLocalPartitioned(); /** * Helper for {@link Object#toString()} implementations. diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/IndexBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/IndexBean.java index a9a1d7fe0..9db115966 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/IndexBean.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/IndexBean.java @@ -42,6 +42,17 @@ class IndexBean implements Index { private final boolean unique; + /** + * Flags if index is partitioned and global. + */ + //TODO: change this protected properties from protected to private and add the appropriate constructors to consider them + protected boolean isGlobalPartitioned; + + /** + * Flags if index is partitioned and local. + */ + protected boolean isLocalPartitioned; + /** * Creates an index bean. * @@ -110,6 +121,13 @@ public boolean isUnique() { } + @Override + public boolean isGlobalPartitioned() { return isGlobalPartitioned; } + + @Override + public boolean isLocalPartitioned() { return isLocalPartitioned; } + + @Override public String toString() { return this.toStringHelper(); diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Partition.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Partition.java new file mode 100644 index 000000000..df40daa01 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Partition.java @@ -0,0 +1,10 @@ +package org.alfasoftware.morf.metadata; + +/** + * Defines a partition on a table. + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public interface Partition { + String name(); +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionBean.java new file mode 100644 index 000000000..60f3cffe1 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionBean.java @@ -0,0 +1,20 @@ +package org.alfasoftware.morf.metadata; + +/** + * Defines the bean for one partitions on a table. {@link Partition} + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public abstract class PartitionBean implements Partition { + + protected String name; + + public PartitionBean(String name) { + this.name = name; + } + + @Override + public String name() { + return name; + } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByHash.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByHash.java new file mode 100644 index 000000000..e5d3edc12 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByHash.java @@ -0,0 +1,12 @@ +package org.alfasoftware.morf.metadata; + +/** + * Defines a partition by range on a table. + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public interface PartitionByHash extends Partition { + String hashFunction(); + String divider(); + String remainder(); +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByHashBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByHashBean.java new file mode 100644 index 000000000..cd00d5b32 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByHashBean.java @@ -0,0 +1,32 @@ +package org.alfasoftware.morf.metadata; + +/** + * Defines the bean for one partition range on a table. {@link PartitionByHash} + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public class PartitionByHashBean extends PartitionBean implements PartitionByHash { + protected String divider; + protected String remainder; + + public PartitionByHashBean(String name, String divider, String remainder) { + super(name); + this.divider = divider; + this.remainder = remainder; + } + + @Override + public String hashFunction() { + return "MOD"; + } + + @Override + public String divider() { + return divider; + } + + @Override + public String remainder() { + return remainder; + } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByRange.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByRange.java new file mode 100644 index 000000000..b57b1c684 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByRange.java @@ -0,0 +1,11 @@ +package org.alfasoftware.morf.metadata; + +/** + * Defines a partition by range on a table. + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public interface PartitionByRange extends Partition { + String start(); + String end(); +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByRangeBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByRangeBean.java new file mode 100644 index 000000000..b9d5a4d6e --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionByRangeBean.java @@ -0,0 +1,27 @@ +package org.alfasoftware.morf.metadata; + +/** + * Defines the bean for one partition range on a table. {@link PartitionByRange} + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public class PartitionByRangeBean extends PartitionBean implements PartitionByRange { + protected String start; + protected String end; + + public PartitionByRangeBean(String name, String start, String end) { + super(name); + this.start = start; + this.end = end; + } + + @Override + public String start() { + return start; + } + + @Override + public String end() { + return end; + } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningByHashRule.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningByHashRule.java new file mode 100644 index 000000000..c3fb090d5 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningByHashRule.java @@ -0,0 +1,56 @@ +package org.alfasoftware.morf.metadata; + +import java.util.ArrayList; +import java.util.List; + + +public class PartitioningByHashRule implements PartitioningRule { + private String columnName; + private int hashDivider; + private List hashRemainders; + private int count; + + + + public PartitioningByHashRule(String columnName, int hashDivider) { + this.columnName = columnName; + this.hashDivider = hashDivider; + this.count = hashDivider; + this.hashRemainders = new ArrayList<>(count); + + for (int i = 0; i < count; i++) { + this.hashRemainders.add(i); + } + } + + + @Override + public String getColumn() { + return columnName; + } + + @Override + public DataType getColumnType() { + return DataType.STRING; + } + + @Override + public PartitioningRuleType getPartitioningType() { + return PartitioningRuleType.hashPartitioning; + } + + + public int getHashDivider() { + return hashDivider; + } + + + public List getHashRemainders() { + return hashRemainders; + } + + + public int getCount() { + return count; + } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningByRangeRule.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningByRangeRule.java new file mode 100644 index 000000000..c5d08d248 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningByRangeRule.java @@ -0,0 +1,54 @@ +package org.alfasoftware.morf.metadata; + +import java.util.List; + +import org.apache.commons.lang3.tuple.Pair; + +public abstract class PartitioningByRangeRule implements PartitioningRule { + protected final String column; + protected final T startValue; + protected final R increment; + protected final int count; + protected List> partitions; + protected DataType columnType; + + protected abstract List> getRanges(); + + public PartitioningByRangeRule(String column, DataType columnType, T startValue, R increment, int count) { + if (column == null || column.isEmpty()) { + throw new IllegalArgumentException("Column name cannot be null or empty"); + } + this.column = column; + this.startValue = startValue; + this.increment = increment; + this.count = count; + this.partitions = getRanges(); + this.columnType = columnType; + } + + + public PartitioningByRangeRule(String column, DataType columnType, List> ranges) { + if (column == null || column.isEmpty()) { + throw new IllegalArgumentException("Column name cannot be null or empty"); + } + this.column = column; + this.startValue = null; + this.increment = null; + this.count = ranges.size(); + this.partitions = ranges; + this.columnType = columnType; + } + + + @Override + public DataType getColumnType() { + return columnType; + } + + + @Override + public String getColumn() { return column; } + + @Override + public PartitioningRuleType getPartitioningType() { return PartitioningRuleType.rangePartitioning; } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningRule.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningRule.java new file mode 100644 index 000000000..f368f6149 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningRule.java @@ -0,0 +1,10 @@ +package org.alfasoftware.morf.metadata; + +/** + * Represents a partitioning rule. + */ +public interface PartitioningRule { + String getColumn(); + DataType getColumnType(); + PartitioningRuleType getPartitioningType(); +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningRuleType.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningRuleType.java new file mode 100644 index 000000000..a200257d8 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitioningRuleType.java @@ -0,0 +1,7 @@ +package org.alfasoftware.morf.metadata; + +public enum PartitioningRuleType { + hashPartitioning, + rangePartitioning, + listPartitioning; +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Partitions.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Partitions.java new file mode 100644 index 000000000..376672fba --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Partitions.java @@ -0,0 +1,15 @@ +package org.alfasoftware.morf.metadata; + +import java.util.List; + +/** + * Defines the partition collection on a table. + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public interface Partitions { + Column column(); + PartitioningRuleType partitioningType(); + PartitioningRule partitioningRule(); + List getPartitions(); +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionsBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionsBean.java new file mode 100644 index 000000000..cae8fe3d4 --- /dev/null +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/PartitionsBean.java @@ -0,0 +1,57 @@ +package org.alfasoftware.morf.metadata; + +import java.util.ArrayList; +import java.util.List; + +/** + * Defines the bean for the partitions collection on a table. {@link Partitions} + * + * @author Copyright (c) Alfa Financial Software 2025 + */ +public class PartitionsBean implements Partitions { + Column column; + PartitioningRuleType partitioningType; + PartitioningRule partitioningRule; + List partitions; + + public PartitionsBean() { + this.column = null; + this.partitions = new ArrayList<>(); + } + + public PartitionsBean(Column column, PartitioningRuleType partitioningType) { + this(column, partitioningType, null, null); + } + + public PartitionsBean(Column column, PartitioningRuleType partitioningType, PartitioningRule partitioningRule) { + this(column, partitioningType, partitioningRule, null); + } + + public PartitionsBean(Column column, PartitioningRuleType partitioningType, PartitioningRule partitioningRule, List partitions) { + this.column = column; + this.partitioningType = partitioningType; + this.partitioningRule = partitioningRule; + this.partitions = partitions; + } + + + @Override + public Column column() { + return column; + } + + @Override + public PartitioningRuleType partitioningType() { + return partitioningType; + } + + @Override + public PartitioningRule partitioningRule() { + return partitioningRule; + } + + @Override + public List getPartitions() { + return partitions; + } +} diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/SchemaUtils.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/SchemaUtils.java index 8aaa9744a..aecdef68a 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/SchemaUtils.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/SchemaUtils.java @@ -15,12 +15,14 @@ package org.alfasoftware.morf.metadata; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.stream.Collectors; import org.alfasoftware.morf.sql.SelectStatement; +import org.apache.commons.lang3.StringUtils; import com.google.common.base.Function; import com.google.common.collect.FluentIterable; @@ -350,6 +352,29 @@ public static IndexBuilder index(String name) { return new IndexBuilderImpl(name); } + /** + * Build a partition list. + * @return A {@link PartitionsBuilder} for the partitions. + */ + public static PartitionsBuilder partitions() { + return new PartitionsBuilderImpl(); + } + + /** + * Build a range partition + * @return A {@link PartitionByRangeBuilder} for the range partitions. + */ + public static PartitionByRangeBuilder partitionByRange(String name) { + return new PartitionByRangeBuilderImpl(name); + } + + /** + * Build a range partition + * @return A {@link PartitionByHashBuilder} for the hash partitions. + */ + public static PartitionByHashBuilder partitionByHash(String name) { + return new PartitionByHashBuilderImpl(name); + } /** * Create a view. @@ -523,8 +548,18 @@ public interface TableBuilder extends Table { * @return this table builder, for method chaining. */ public TableBuilder temporary(); + + + /** + * The partitioning rule for the table is defined here. + * @param rule The rule applied on the column to define partitions on the table + * @return this table builder, for method chaining. + */ + public TableBuilder partitionBy(PartitioningRule rule); + } + /** * Builds {@link Column} implementations. */ @@ -584,6 +619,13 @@ public interface ColumnBuilder extends Column { * @return this, for method chaining. */ public ColumnBuilder dataType(DataType dataType); + + + /** + * Marks the column as the partition source value. + * @return this, for method chaining. + */ + public ColumnBuilder partitioned(); } /** @@ -615,8 +657,55 @@ public interface IndexBuilder extends Index { * @return this, for method chaining. */ public IndexBuilder unique(); + + /** + * Mark this index as isGlobalPartitioned. + * + * @return this, for method chaining. + */ + IndexBuilder globalPartitioned(); + + /** + * Mark this index as isLocalPartitioned. + * + * @return this, for method chaining. + */ + IndexBuilder localPartitioned(); + } + + /** + * Builds {@link Partitions} implementations. + */ + public interface PartitionsBuilder extends Partitions { + PartitionsBuilder column(Column column); + + PartitionsBuilder ruleType(PartitioningRuleType ruleType); + + PartitionsBuilder partitions(Iterable partitions); + } + + /** + * Builds {@link Partition} implementations. + */ + /*public interface PartitionBuilder extends Partition { + PartitionsBuilder name(String name); + }*/ + + /** + * Builds {@link PartitionByRange} implementations. + */ + public interface PartitionByRangeBuilder extends PartitionByRange { + PartitionByRangeBuilder start(String start); + PartitionByRangeBuilder end(String end); } + /** + * Builds {@link PartitionByHash} implementations. + */ + public interface PartitionByHashBuilder extends PartitionByHash { + PartitionByHashBuilder divider(String start); + PartitionByHashBuilder remainder(String end); + } /** * Private implementation of {@link SequenceBuilder}. @@ -701,6 +790,20 @@ public TableBuilder indexes(Iterable indexes) { public TableBuilder temporary() { return new TableBuilderImpl(getName(), columns(), indexes(), true); } + + + /** + * @see org.alfasoftware.morf.metadata.SchemaUtils.TableBuilder#partitionBy(PartitioningRule) + */ + @Override + public TableBuilder partitionBy(PartitioningRule rule) { + this.partitionColumn = rule.getColumn(); + this.partitioningRule = rule; + return this; + } + + @Override + public boolean isPartitioned() { return !StringUtils.isEmpty(this.partitionColumn); }; } /** @@ -768,6 +871,12 @@ public ColumnBuilder dataType(DataType dataType) { ColumnBuilderImpl column = new ColumnBuilderImpl(getName(), dataType, getWidth(), getScale()); return new ColumnBuilderImpl(column, isNullable(), getDefaultValue(), isPrimaryKey(), isAutoNumbered(), getAutoNumberStart()); } + + @Override + public ColumnBuilder partitioned() { + this.partitioned = true; + return this; + } } /** @@ -816,8 +925,114 @@ public IndexBuilder unique() { public String toString() { return this.toStringHelper(); } + + /** + * @see org.alfasoftware.morf.metadata.SchemaUtils.IndexBuilder#isGlobalPartitioned() + */ + @Override + public IndexBuilder globalPartitioned() { + this.isGlobalPartitioned = true; + return this; + } + + /** + * @see org.alfasoftware.morf.metadata.SchemaUtils.IndexBuilder#isLocalPartitioned() + */ + @Override + public IndexBuilder localPartitioned() { + this.isLocalPartitioned = true; + return this; + } } + /** + * private implementation of {@link PartitionsBuilder} + */ + private static final class PartitionsBuilderImpl extends PartitionsBean implements PartitionsBuilder { + + private PartitionsBuilderImpl() { + super(); + } + + private PartitionsBuilderImpl(Column column, PartitioningRuleType ruleType) { + super(column, ruleType); + } + + private PartitionsBuilderImpl(Column column, PartitioningRuleType ruleType, PartitioningRule partitioningRule, Iterable partitions) { + this.column = column; + this.partitioningType = ruleType; + this.partitioningRule = partitioningRule; + + this.partitions = new ArrayList<>(); + for (Partition partition : partitions) { + this.partitions.add(partition); + } + } + + @Override + public PartitionsBuilder column(Column column) { + return new PartitionsBuilderImpl(column, partitioningType, partitioningRule, partitions); + } + + @Override + public PartitionsBuilder ruleType(PartitioningRuleType ruleType) { + return new PartitionsBuilderImpl(column, ruleType, partitioningRule, partitions); + } + + @Override + public PartitionsBuilder partitions(Iterable partitions) { + return new PartitionsBuilderImpl(column, partitioningType, partitioningRule, partitions); + } + } + + + /** + * private implementation of {@link PartitionByRangeBuilder} + */ + public static final class PartitionByRangeBuilderImpl extends PartitionByRangeBean implements PartitionByRangeBuilder { + + private PartitionByRangeBuilderImpl(String name) { + super(name, null, null); + } + + private PartitionByRangeBuilderImpl(String name, String start, String end) { + super(name, start, end); + } + + @Override + public PartitionByRangeBuilder start(String start) { + return new PartitionByRangeBuilderImpl(name, start, end); + } + + @Override + public PartitionByRangeBuilder end(String end) { + return new PartitionByRangeBuilderImpl(name, start, end); + } + } + + /** + * private implementation of {@link PartitionByHashBuilder} + */ + private static final class PartitionByHashBuilderImpl extends PartitionByHashBean implements PartitionByHashBuilder { + + private PartitionByHashBuilderImpl(String name) { + super(name, null, null); + } + + private PartitionByHashBuilderImpl(String name, String divider, String remainder) { + super(name, divider, remainder); + } + + @Override + public PartitionByHashBuilder divider(String divider) { + return new PartitionByHashBuilderImpl(name, divider, remainder); + } + + @Override + public PartitionByHashBuilder remainder(String remainder) { + return new PartitionByHashBuilderImpl(name, divider, remainder); + } + } /** * List the primary key columns for a given table. diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Table.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Table.java index 99d80d462..70b740ac7 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/Table.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/Table.java @@ -66,4 +66,16 @@ public default List primaryKey() { */ public boolean isTemporary(); + /** + * @return Indicates whether the table is partitioned + */ + boolean isPartitioned(); + + + /** + * @return the partitioning rule if it exists. + */ + Partitions partitions(); + + PartitioningRule partitioningRule(); } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/metadata/TableBean.java b/morf-core/src/main/java/org/alfasoftware/morf/metadata/TableBean.java index c417c3674..89d73f718 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/metadata/TableBean.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/metadata/TableBean.java @@ -18,6 +18,8 @@ import java.util.ArrayList; import java.util.List; +import org.apache.commons.lang3.StringUtils; + import com.google.common.collect.Iterables; @@ -48,6 +50,26 @@ class TableBean implements Table { */ private final boolean isTemporary; + /** + * The column used to partition by. + */ + protected String partitionColumn; + + /** + * The rule to use to partition by on the table. + */ + protected PartitioningRule partitioningRule; + + /** + * The table partitions collection on the table. + */ + protected Partitions partitions; + + /** + * The table to use as an example scheme for partitioning this one. + */ + protected String partitionedLikeTable; + /** * Creates a table bean. * @@ -164,6 +186,17 @@ public boolean isTemporary() { return isTemporary; } + @Override + public boolean isPartitioned() { + return !StringUtils.isEmpty(partitionColumn); + } + + + @Override + public PartitioningRule partitioningRule() { return partitioningRule; } + + @Override + public Partitions partitions() { return partitions; } @Override public String toString() { diff --git a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/RenameTable.java b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/RenameTable.java index 459690c12..6c2eeccc6 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/RenameTable.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/RenameTable.java @@ -21,9 +21,12 @@ import org.alfasoftware.morf.jdbc.ConnectionResources; import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.Table; + import com.google.common.collect.Maps; /** @@ -185,5 +188,18 @@ public List indexes() { public boolean isTemporary() { return baseTable.isTemporary(); } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public Partitions partitions() { + return null; + } } } \ No newline at end of file diff --git a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/AlteredTable.java b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/AlteredTable.java index deeecd8bc..07a59cb23 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/AlteredTable.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/AlteredTable.java @@ -23,6 +23,8 @@ import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Table; /** @@ -160,4 +162,18 @@ public List indexes() { public boolean isTemporary() { return baseTable.isTemporary(); } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public Partitions partitions() { + return null; + } + ; } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/IndexNameDecorator.java b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/IndexNameDecorator.java index 660fac89c..708e769da 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/IndexNameDecorator.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/IndexNameDecorator.java @@ -58,6 +58,16 @@ public boolean isUnique() { return index.isUnique(); } + @Override + public boolean isGlobalPartitioned() { + return false; + } + + @Override + public boolean isLocalPartitioned() { + return false; + } + /** * @see org.alfasoftware.morf.metadata.Index#getName() */ diff --git a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/TableNameDecorator.java b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/TableNameDecorator.java index 889724720..0392a2f8e 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/TableNameDecorator.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/upgrade/adapt/TableNameDecorator.java @@ -18,11 +18,12 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.lang3.StringUtils; - import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Table; +import org.apache.commons.lang3.StringUtils; /** * Decorator that changes a table name for deploying transitional tables. @@ -92,4 +93,19 @@ public List indexes() { public boolean isTemporary() { return false; } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public Partitions partitions() { + return null; + } + + ; } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetConsumer.java b/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetConsumer.java index df594cccb..c889cf530 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetConsumer.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetConsumer.java @@ -29,6 +29,11 @@ import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.Partition; +import org.alfasoftware.morf.metadata.PartitionByHash; +import org.alfasoftware.morf.metadata.PartitionByRange; +import org.alfasoftware.morf.metadata.PartitioningRuleType; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Table; import org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider; import org.apache.commons.lang3.StringUtils; @@ -298,9 +303,73 @@ public int compare(Index o1, Index o2) { emptyElement(contentHandler, XmlDataSetNode.INDEX_NODE, buildIndexAttributes(index)); } + if (table.isPartitioned()) { + contentHandler.startElement(XmlDataSetNode.URI, XmlDataSetNode.PARTITIONS_NODE, XmlDataSetNode.PARTITIONS_NODE, buildPartitionsAttribute(table.partitions())); + for (Partition partition : table.partitions().getPartitions()) { + emptyElement(contentHandler, XmlDataSetNode.PARTITION_NODE, buildPartitionAttribute(table.partitions(), partition)); + } + contentHandler.endElement(XmlDataSetNode.URI, XmlDataSetNode.PARTITIONS_NODE, XmlDataSetNode.PARTITIONS_NODE); + } + contentHandler.endElement(XmlDataSetNode.URI, XmlDataSetNode.METADATA_NODE, XmlDataSetNode.METADATA_NODE); } + private Attributes buildPartitionAttribute(Partitions partitions, Partition partition) { + AttributesImpl partitionAttributes = new AttributesImpl(); + + partitionAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.NAME_ATTRIBUTE, XmlDataSetNode.NAME_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, partition.name()); + + switch (partitions.partitioningType()) { + case rangePartitioning: + PartitionByRange partitionByRange = (PartitionByRange) partition; + partitionAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.START_ATTRIBUTE, XmlDataSetNode.START_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, partitionByRange.start()); + partitionAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.END_ATTRIBUTE, XmlDataSetNode.END_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, partitionByRange.end()); + break; + case hashPartitioning: + PartitionByHash partitionByHash = (PartitionByHash) partition; + partitionAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.DIVIDER_ATTRIBUTE, XmlDataSetNode.DIVIDER_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, partitionByHash.divider()); + partitionAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.REMAINDER_ATTRIBUTE, XmlDataSetNode.REMAINDER_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, partitionByHash.remainder()); + break; + default: + break; + } + + return partitionAttributes; + } + + private Attributes buildPartitionsAttribute(Partitions partitions) { + AttributesImpl partitionsAttributes = new AttributesImpl(); + + partitionsAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.COLUMN_NODE, XmlDataSetNode.COLUMN_NODE, + XmlDataSetNode.STRING_TYPE, partitions.column().getName()); + + String partitioningType = ""; + switch (partitions.partitioningType()) { + case rangePartitioning: + partitioningType = "range"; + break; + case hashPartitioning: + partitioningType = "hash"; + break; + default: + break; + } + + partitionsAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.TYPE_ATTRIBUTE, XmlDataSetNode.TYPE_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, partitioningType); + if (partitions.partitioningType().equals(PartitioningRuleType.hashPartitioning)) { + partitionsAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.HASH_FUNCTION_ATTRIBUTE, XmlDataSetNode.HASH_FUNCTION_ATTRIBUTE, + XmlDataSetNode.STRING_TYPE, "MOD"); + } + + return partitionsAttributes; + } + /** * Build the attributes for a database index * diff --git a/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetNode.java b/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetNode.java index e86c890e7..7c848ab6c 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetNode.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetNode.java @@ -118,4 +118,44 @@ public class XmlDataSetNode { */ public static final String AUTONUMBER_ATTRIBUTE = "autoNum"; + + /** + * Node name for partitions + */ + public static final String PARTITIONS_NODE = "partitions"; + + /** + * Node name for partition + */ + public static final String PARTITION_NODE = "partition"; + + /** + * Node name for hashFunction + */ + public static final String HASH_FUNCTION_ATTRIBUTE = "hashFunction"; + + /** + * Node name for start + */ + public static final String START_ATTRIBUTE = "start"; + + /** + * Node name for end + */ + public static final String END_ATTRIBUTE = "end"; + + /** + * Attribute name for the rule type property. + */ + public static final String RULE_TYPE_ATTRIBUTE = "ruleType"; + + /** + * Attribute name for the divider property. + */ + public static final String DIVIDER_ATTRIBUTE = "divider"; + + /** + * Attribute name for the remainder property. + */ + public static final String REMAINDER_ATTRIBUTE = "remainder"; } diff --git a/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetProducer.java b/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetProducer.java index 6752b9179..67375ea08 100755 --- a/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetProducer.java +++ b/morf-core/src/main/java/org/alfasoftware/morf/xml/XmlDataSetProducer.java @@ -43,13 +43,17 @@ import org.alfasoftware.morf.dataset.Record; import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataSetUtils; +import org.alfasoftware.morf.metadata.DataSetUtils.RecordBuilder; import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.Partition; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.PartitioningRuleType; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.Sequence; import org.alfasoftware.morf.metadata.Table; -import org.alfasoftware.morf.metadata.DataSetUtils.RecordBuilder; import org.alfasoftware.morf.metadata.View; import org.alfasoftware.morf.xml.XmlStreamProvider.XmlInputStreamProvider; import org.apache.commons.lang3.StringUtils; @@ -423,12 +427,28 @@ private static final class PullProcessorTableMetaData extends XmlPullProcessor i */ private final List indexes = new LinkedList<>(); + private Partitions partitions; + /** * Holds the table name. */ private final String tableName; + @Override + public boolean isPartitioned() { return partitions != null; } + + @Override + public PartitioningRule partitioningRule() { + return partitions.partitioningRule(); + } + + @Override + public Partitions partitions() { + return partitions; + } + + /** * @param xmlStreamReader pull parser that provides the xml data * @param xmlFormatVersion The format version. @@ -456,6 +476,10 @@ public PullProcessorTableMetaData(XMLStreamReader xmlStreamReader, int xmlFormat if (XmlDataSetNode.INDEX_NODE.equals(nextTag)) { indexes.add(new PullProcessorIndex()); } + + if (XmlDataSetNode.PARTITIONS_NODE.equals(nextTag)) { + partitions = new PullProcessorPartitions(); + } } } catch (RuntimeException e) { @@ -683,6 +707,12 @@ public int getAutoNumberStart() { return autonumberStart == null ? 0 : autonumberStart; } + @Override + public boolean isPartitioned() { + //TODO: implement this method to read whether the table is partitioned from XML + return false; + } + @Override public String toString() { @@ -741,6 +771,18 @@ public boolean isUnique() { return isUnique; } + @Override + public boolean isGlobalPartitioned() { + //TODO: support global index part spec reading from XML + return false; + } + + @Override + public boolean isLocalPartitioned() { + //TODO: support local index part spec reading from XML + return false; + } + /** * @see org.alfasoftware.morf.metadata.Index#columnNames() @@ -757,6 +799,84 @@ public String toString() { } } + /** + * Implementation of {@link Partitions} that can read from the pull processor. + * + * @author Copyright (c) Alfa Financial Software 2010 + */ + private class PullProcessorPartitions implements Partitions { + private String columnName; + private Column column; + private PartitioningRuleType partitioningRuleType; + private List partitions = new ArrayList<>(); + + public PullProcessorPartitions() { + super(); + columnName = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.COLUMN_NODE); + column = columns.stream().filter(c -> c.getName().equals(columnName)).findFirst().orElse(null); + + if (column != null) { + int colIndex = columns.indexOf(column); + columns.remove(column); + columns.add(colIndex, SchemaUtils.column(column).partitioned()); + } + + String partitioningType = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.TYPE_ATTRIBUTE); + switch (partitioningType) { + case "hash": + partitioningRuleType = PartitioningRuleType.hashPartitioning; + break; + case "range": + partitioningRuleType = PartitioningRuleType.rangePartitioning; + break; + default: + break; + } + + for (String nextTag = readNextTagInsideParent(XmlDataSetNode.PARTITIONS_NODE); nextTag != null; nextTag = readNextTagInsideParent(XmlDataSetNode.PARTITIONS_NODE)) { + if (nextTag.equals(XmlDataSetNode.PARTITION_NODE)) { + String partitionName = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.NAME_ATTRIBUTE); + Partition partition = null; + if (partitioningRuleType == PartitioningRuleType.hashPartitioning) { + SchemaUtils.PartitionByHashBuilder partitionByHash = SchemaUtils.partitionByHash(partitionName); + String divider = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.DIVIDER_ATTRIBUTE); + String remainder = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.REMAINDER_ATTRIBUTE); + partition = partitionByHash.divider(divider).remainder(remainder); + } else if (partitioningRuleType == PartitioningRuleType.rangePartitioning) { + SchemaUtils.PartitionByRangeBuilder partitionByRange = SchemaUtils.partitionByRange(partitionName); + String start = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.START_ATTRIBUTE); + String end = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.END_ATTRIBUTE); + partition = partitionByRange.start(start).end(end); + } + if (partition != null) { + partitions.add(partition); + } + readNextTagInsideParent(XmlDataSetNode.PARTITION_NODE); + } + } + } + + @Override + public Column column() { + return column; + } + + @Override + public PartitioningRuleType partitioningType() { + return partitioningRuleType; + } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public List getPartitions() { + return partitions; + } + } + /** * {@inheritDoc} @@ -770,6 +890,7 @@ public boolean isTemporary() { } + /** * Provides on demand XML reading as a record iterator. * diff --git a/morf-core/src/test/java/org/alfasoftware/morf/dataset/TestWithMetaDataAdapter.java b/morf-core/src/test/java/org/alfasoftware/morf/dataset/TestWithMetaDataAdapter.java index 244f29854..88d4b15c5 100755 --- a/morf-core/src/test/java/org/alfasoftware/morf/dataset/TestWithMetaDataAdapter.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/dataset/TestWithMetaDataAdapter.java @@ -25,7 +25,15 @@ import java.util.HashSet; import java.util.Set; -import org.alfasoftware.morf.metadata.*; +import org.alfasoftware.morf.metadata.Column; +import org.alfasoftware.morf.metadata.DataSetUtils; +import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; +import org.alfasoftware.morf.metadata.Schema; +import org.alfasoftware.morf.metadata.Sequence; +import org.alfasoftware.morf.metadata.Table; +import org.alfasoftware.morf.metadata.View; import org.junit.Test; /** @@ -160,6 +168,23 @@ public String getName() { public java.util.List indexes() { return null; } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + //TODO: support metadata reading on whether the table is partitioned. + return null; + } + + @Override + public Partitions partitions() { + //TODO: support metadata reading on whether the table is partitioned. + return null; + } + + ; }; } diff --git a/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestResultSetIterator.java b/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestResultSetIterator.java index c43e92e32..bbcc49740 100644 --- a/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestResultSetIterator.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestResultSetIterator.java @@ -2,8 +2,8 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.mockito.BDDMockito.given; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -18,6 +18,8 @@ import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.Table; import org.alfasoftware.morf.sql.SelectStatement; @@ -292,6 +294,23 @@ public String getName() { public List columns() { return Lists.newArrayList(SchemaUtils.column("Column", DataType.STRING, 20).nullable()); } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + //TODO: implement table building with partitioning + return null; + } + + @Override + public Partitions partitions() { + //TODO: implement table building with partitioning + return null; + } + + ; }; } } diff --git a/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestSqlQueryDataSetProducer.java b/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestSqlQueryDataSetProducer.java index 6ab361500..abd4e3187 100644 --- a/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestSqlQueryDataSetProducer.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/jdbc/TestSqlQueryDataSetProducer.java @@ -16,6 +16,8 @@ import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningRule; +import org.alfasoftware.morf.metadata.Partitions; import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.Table; import org.junit.Before; @@ -148,6 +150,21 @@ public String getName() { public List columns() { return Lists.newArrayList(SchemaUtils.column("Column", DataType.STRING, 20).nullable()); } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public Partitions partitions() { + return null; + } + + ; }; } } diff --git a/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestCompositeSchema.java b/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestCompositeSchema.java index d6a5fb727..2e2765ad8 100755 --- a/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestCompositeSchema.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestCompositeSchema.java @@ -15,7 +15,10 @@ package org.alfasoftware.morf.metadata; -import static org.alfasoftware.morf.metadata.SchemaUtils.*; +import static org.alfasoftware.morf.metadata.SchemaUtils.schema; +import static org.alfasoftware.morf.metadata.SchemaUtils.sequence; +import static org.alfasoftware.morf.metadata.SchemaUtils.table; +import static org.alfasoftware.morf.metadata.SchemaUtils.view; import static org.alfasoftware.morf.sql.SqlUtils.select; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; diff --git a/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestSchemaBean.java b/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestSchemaBean.java index d111b2b0c..f64e11e82 100755 --- a/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestSchemaBean.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/metadata/TestSchemaBean.java @@ -28,10 +28,10 @@ import java.util.Collection; import java.util.List; -import org.junit.Test; - import org.alfasoftware.morf.sql.SelectStatement; import org.alfasoftware.morf.sql.element.TableReference; +import org.junit.Test; + import com.google.common.collect.ImmutableList; /** @@ -136,6 +136,21 @@ public List columns() { public boolean isTemporary() { return false; } + + @Override + public boolean isPartitioned() { return false; } + + @Override + public PartitioningRule partitioningRule() { + return null; + } + + @Override + public Partitions partitions() { + return null; + } + + ; }; diff --git a/morf-core/src/test/java/org/alfasoftware/morf/upgrade/adapt/TestTableSetSchema.java b/morf-core/src/test/java/org/alfasoftware/morf/upgrade/adapt/TestTableSetSchema.java index 04c1994cc..52a257e10 100755 --- a/morf-core/src/test/java/org/alfasoftware/morf/upgrade/adapt/TestTableSetSchema.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/upgrade/adapt/TestTableSetSchema.java @@ -15,7 +15,9 @@ package org.alfasoftware.morf.upgrade.adapt; -import static org.alfasoftware.morf.metadata.SchemaUtils.*; +import static org.alfasoftware.morf.metadata.SchemaUtils.column; +import static org.alfasoftware.morf.metadata.SchemaUtils.sequence; +import static org.alfasoftware.morf.metadata.SchemaUtils.table; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -24,13 +26,12 @@ import java.util.HashSet; import java.util.Set; +import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.Sequence; +import org.alfasoftware.morf.metadata.Table; import org.junit.Before; import org.junit.Test; -import org.alfasoftware.morf.metadata.DataType; -import org.alfasoftware.morf.metadata.Table; - /** * Test the functionality provided by {@link TableSetSchema} * diff --git a/morf-core/src/test/java/org/alfasoftware/morf/xml/TestXmlDataSetProducer.java b/morf-core/src/test/java/org/alfasoftware/morf/xml/TestXmlDataSetProducer.java index b3e9308ef..9be7bd10d 100755 --- a/morf-core/src/test/java/org/alfasoftware/morf/xml/TestXmlDataSetProducer.java +++ b/morf-core/src/test/java/org/alfasoftware/morf/xml/TestXmlDataSetProducer.java @@ -213,6 +213,7 @@ private void testTableNamesAgainstProducer(XmlDataSetProducer producer) { use(producer.records("eNTITYoNE")); assertFalse("Non existant table", producer.getSchema().tableNames().contains("NotExist")); + producer.close(); } diff --git a/morf-excel/src/main/java/org/alfasoftware/morf/excel/SpreadsheetDataSetProducer.java b/morf-excel/src/main/java/org/alfasoftware/morf/excel/SpreadsheetDataSetProducer.java index 6e8cff643..a7352911a 100755 --- a/morf-excel/src/main/java/org/alfasoftware/morf/excel/SpreadsheetDataSetProducer.java +++ b/morf-excel/src/main/java/org/alfasoftware/morf/excel/SpreadsheetDataSetProducer.java @@ -31,10 +31,10 @@ import org.alfasoftware.morf.dataset.DataSetProducer; import org.alfasoftware.morf.dataset.Record; import org.alfasoftware.morf.metadata.DataSetUtils; +import org.alfasoftware.morf.metadata.DataSetUtils.RecordBuilder; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.Sequence; import org.alfasoftware.morf.metadata.Table; -import org.alfasoftware.morf.metadata.DataSetUtils.RecordBuilder; import org.alfasoftware.morf.metadata.View; import org.apache.commons.lang3.StringUtils; diff --git a/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2Dialect.java b/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2Dialect.java index 6655b4a4e..c8c5c66af 100755 --- a/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2Dialect.java +++ b/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2Dialect.java @@ -60,7 +60,9 @@ protected List expectedCreateTableStatements() { "CREATE INDEX Alternate_1 ON "+TEST_SCHEMA+".Alternate (stringField)", "CREATE TABLE "+TEST_SCHEMA+".NonNull (id BIGINT NOT NULL, version INTEGER DEFAULT 0, stringField VARCHAR(3) NOT NULL, intField DECIMAL(8,0) NOT NULL, booleanField BIT NOT NULL, dateField DATE NOT NULL, blobField LONGVARBINARY NOT NULL, CONSTRAINT NonNull_PK PRIMARY KEY (id))", "CREATE TABLE "+TEST_SCHEMA+".CompositePrimaryKey (id BIGINT NOT NULL, version INTEGER DEFAULT 0, stringField VARCHAR(3) NOT NULL, secondPrimaryKey VARCHAR(3) NOT NULL, CONSTRAINT CompositePrimaryKey_PK PRIMARY KEY (id, secondPrimaryKey))", - "CREATE TABLE "+TEST_SCHEMA+".AutoNumber (intField BIGINT AUTO_INCREMENT(5) COMMENT 'AUTONUMSTART:[5]', CONSTRAINT AutoNumber_PK PRIMARY KEY (intField))" + "CREATE TABLE "+TEST_SCHEMA+".AutoNumber (intField BIGINT AUTO_INCREMENT(5) COMMENT 'AUTONUMSTART:[5]', CONSTRAINT AutoNumber_PK PRIMARY KEY (intField))", + "CREATE TABLE "+TEST_SCHEMA+".Measurement (intField DECIMAL(8,0) NOT NULL, dateField DATE NOT NULL, stringField VARCHAR(3) NOT NULL)", + "CREATE TABLE "+TEST_SCHEMA+".MeasurementHash (intField DECIMAL(8,0) NOT NULL, dateField DATE NOT NULL, stringField VARCHAR(3) NOT NULL)" ); } diff --git a/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2MetaDataProvider.java b/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2MetaDataProvider.java index ea8890ce8..46c584f21 100644 --- a/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2MetaDataProvider.java +++ b/morf-h2/src/test/java/org/alfasoftware/morf/jdbc/h2/TestH2MetaDataProvider.java @@ -15,6 +15,20 @@ package org.alfasoftware.morf.jdbc.h2; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.RETURNS_SMART_NULLS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import javax.sql.DataSource; + import org.alfasoftware.morf.jdbc.DatabaseType; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.Sequence; @@ -23,16 +37,6 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import javax.sql.DataSource; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.*; -import static org.mockito.Mockito.when; - /** * Test class for {@link H2MetaDataProvider} diff --git a/morf-integration-test/src/test/java/org/alfasoftware/morf/dataset/MockDataSetProducer.java b/morf-integration-test/src/test/java/org/alfasoftware/morf/dataset/MockDataSetProducer.java index 63bdce046..621f2313d 100755 --- a/morf-integration-test/src/test/java/org/alfasoftware/morf/dataset/MockDataSetProducer.java +++ b/morf-integration-test/src/test/java/org/alfasoftware/morf/dataset/MockDataSetProducer.java @@ -25,6 +25,7 @@ import org.alfasoftware.morf.metadata.Sequence; import org.alfasoftware.morf.metadata.Table; import org.alfasoftware.morf.metadata.View; + import com.google.common.collect.Maps; /** @@ -155,6 +156,7 @@ public Collection tableNames() { return tables.keySet(); } + /** * @see org.alfasoftware.morf.metadata.Schema#tables() */ diff --git a/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestDatabaseUpgradeIntegration.java b/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestDatabaseUpgradeIntegration.java index bcbaac8f4..1eeac8c7b 100755 --- a/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestDatabaseUpgradeIntegration.java +++ b/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestDatabaseUpgradeIntegration.java @@ -40,10 +40,7 @@ import static org.alfasoftware.morf.upgrade.db.DatabaseUpgradeTableContribution.deployedViewsTable; import static org.alfasoftware.morf.upgrade.db.DatabaseUpgradeTableContribution.upgradeAuditTable; import static org.hamcrest.Matchers.equalToIgnoringCase; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.Assert.*; import java.math.BigDecimal; import java.sql.Connection; @@ -556,6 +553,7 @@ public void testAddPrimaryKeyColumns() { */ @Test public void testAddIndexWithExistingPRFIndex() { + boolean isOracle = connectionResources.getDatabaseType().equals("ORACLE"); Table tableWithNewAddIndex = table("BasicTableWithIndex") .columns( column("stringCol", DataType.STRING, 20).primaryKey(), @@ -571,7 +569,12 @@ public void testAddIndexWithExistingPRFIndex() { Schema reAdded = replaceTablesInSchema(tableWithNewAddIndex); - verifyUpgrade(reAdded, AddIndex.class); + if (isOracle) { + RuntimeSqlException sqlException = assertThrows(RuntimeSqlException.class, () -> verifyUpgrade(reAdded, AddIndex.class)); + assertTrue("Oracle exception ORA-01408: such column list already indexed", sqlException.getMessage().contains("[1408]")); + } else { + verifyUpgrade(reAdded, AddIndex.class); + } } @@ -1121,7 +1124,7 @@ private void doAddColumn(Class upgradeStep) throws SQLExc "(?is)(" + "NULL not allowed for column \"ANOTHERVALUE\"" + ".*)" // H2 + "|(" + "Field 'anotherValue' doesn't have a default value" + ".*)" // MySQL + "|(" + "ORA-01400: cannot insert NULL into \\(.*ANOTHERVALUE.*\\)" + ".*)" // Oracle - + "|(" + "ERROR: null value in column \"anothervalue\" violates not-null constraint" + ".*)" // PgSQL + + "|(" + "ERROR: null value in column \"anothervalue\" of relation \"withdefaultvalue\" violates not-null constraint" + ".*)" // PgSQL )); } } diff --git a/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestSqlStatements.java b/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestSqlStatements.java index 25267ccbf..26a6b99f1 100755 --- a/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestSqlStatements.java +++ b/morf-integration-test/src/test/java/org/alfasoftware/morf/integration/TestSqlStatements.java @@ -117,6 +117,7 @@ import java.util.List; import java.util.Random; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.UnaryOperator; @@ -155,12 +156,15 @@ import org.alfasoftware.morf.sql.element.FieldLiteral; import org.alfasoftware.morf.sql.element.FieldReference; import org.alfasoftware.morf.sql.element.Function; +import org.alfasoftware.morf.sql.element.PortableSqlFunction; import org.alfasoftware.morf.sql.element.SqlParameter; import org.alfasoftware.morf.sql.element.TableReference; import org.alfasoftware.morf.testing.DatabaseSchemaManager; import org.alfasoftware.morf.testing.DatabaseSchemaManager.TruncationBehavior; import org.alfasoftware.morf.testing.TestingDataSourceModule; import org.alfasoftware.morf.upgrade.LoggingSqlScriptVisitor; +import org.apache.commons.codec.DecoderException; +import org.apache.commons.codec.binary.Hex; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -211,6 +215,24 @@ public class TestSqlStatements { //CHECKSTYLE:OFF private static final String BLOB1_VALUE = "A Blob named One"; private static final String BLOB2_VALUE = "A Blob named Two"; + private static final byte[] BLOB3_VALUE = new byte[] { + (byte)0x00, (byte)0x01, (byte)0x02, (byte)0x03, (byte)0x04, (byte)0x05, (byte)0x06, (byte)0x07, (byte)0x08, (byte)0x09, (byte)0x0A, (byte)0x0B, (byte)0x0C, (byte)0x0D, (byte)0x0E, (byte)0x0F, (byte)0x10, + (byte)0x11, (byte)0x12, (byte)0x13, (byte)0x14, (byte)0x15, (byte)0x16, (byte)0x17, (byte)0x18, (byte)0x19, (byte)0x1A, (byte)0x1B, (byte)0x1C, (byte)0x1D, (byte)0x1E, (byte)0x1F, (byte)0x20, (byte)0x21, + (byte)0x22, (byte)0x23, (byte)0x24, (byte)0x25, (byte)0x26, (byte)0x27, (byte)0x28, (byte)0x29, (byte)0x2A, (byte)0x2B, (byte)0x2C, (byte)0x2D, (byte)0x2E, (byte)0x2F, (byte)0x30, (byte)0x31, (byte)0x32, + (byte)0x33, (byte)0x34, (byte)0x35, (byte)0x36, (byte)0x37, (byte)0x38, (byte)0x39, (byte)0x3A, (byte)0x3B, (byte)0x3C, (byte)0x3D, (byte)0x3E, (byte)0x3F, (byte)0x40, (byte)0x41, (byte)0x42, (byte)0x43, + (byte)0x44, (byte)0x45, (byte)0x46, (byte)0x47, (byte)0x48, (byte)0x49, (byte)0x4A, (byte)0x4B, (byte)0x4C, (byte)0x4D, (byte)0x4E, (byte)0x4F, (byte)0x50, (byte)0x51, (byte)0x52, (byte)0x53, (byte)0x54, + (byte)0x55, (byte)0x56, (byte)0x57, (byte)0x58, (byte)0x59, (byte)0x5A, (byte)0x5B, (byte)0x5C, (byte)0x5D, (byte)0x5E, (byte)0x5F, (byte)0x60, (byte)0x61, (byte)0x62, (byte)0x63, (byte)0x64, (byte)0x65, + (byte)0x66, (byte)0x67, (byte)0x68, (byte)0x69, (byte)0x6A, (byte)0x6B, (byte)0x6C, (byte)0x6D, (byte)0x6E, (byte)0x6F, (byte)0x70, (byte)0x71, (byte)0x72, (byte)0x73, (byte)0x74, (byte)0x75, (byte)0x76, + (byte)0x77, (byte)0x78, (byte)0x79, (byte)0x7A, (byte)0x7B, (byte)0x7C, (byte)0x7D, (byte)0x7E, (byte)0x7F, (byte)0x80, (byte)0x81, (byte)0x82, (byte)0x83, (byte)0x84, (byte)0x85, (byte)0x86, (byte)0x87, + (byte)0x88, (byte)0x89, (byte)0x8A, (byte)0x8B, (byte)0x8C, (byte)0x8D, (byte)0x8E, (byte)0x8F, (byte)0x90, (byte)0x91, (byte)0x92, (byte)0x93, (byte)0x94, (byte)0x95, (byte)0x96, (byte)0x97, (byte)0x98, + (byte)0x99, (byte)0x9A, (byte)0x9B, (byte)0x9C, (byte)0x9D, (byte)0x9E, (byte)0x9F, (byte)0xA0, (byte)0xA1, (byte)0xA2, (byte)0xA3, (byte)0xA4, (byte)0xA5, (byte)0xA6, (byte)0xA7, (byte)0xA8, (byte)0xA9, + (byte)0xAA, (byte)0xAB, (byte)0xAC, (byte)0xAD, (byte)0xAE, (byte)0xAF, (byte)0xB0, (byte)0xB1, (byte)0xB2, (byte)0xB3, (byte)0xB4, (byte)0xB5, (byte)0xB6, (byte)0xB7, (byte)0xB8, (byte)0xB9, (byte)0xBA, + (byte)0xBB, (byte)0xBC, (byte)0xBD, (byte)0xBE, (byte)0xBF, (byte)0xC0, (byte)0xC1, (byte)0xC2, (byte)0xC3, (byte)0xC4, (byte)0xC5, (byte)0xC6, (byte)0xC7, (byte)0xC8, (byte)0xC9, (byte)0xCA, (byte)0xCB, + (byte)0xCC, (byte)0xCD, (byte)0xCE, (byte)0xCF, (byte)0xD0, (byte)0xD1, (byte)0xD2, (byte)0xD3, (byte)0xD4, (byte)0xD5, (byte)0xD6, (byte)0xD7, (byte)0xD8, (byte)0xD9, (byte)0xDA, (byte)0xDB, (byte)0xDC, + (byte)0xDD, (byte)0xDE, (byte)0xDF, (byte)0xE0, (byte)0xE1, (byte)0xE2, (byte)0xE3, (byte)0xE4, (byte)0xE5, (byte)0xE6, (byte)0xE7, (byte)0xE8, (byte)0xE9, (byte)0xEA, (byte)0xEB, (byte)0xEC, (byte)0xED, + (byte)0xEE, (byte)0xEF, (byte)0xF0, (byte)0xF1, (byte)0xF2, (byte)0xF3, (byte)0xF4, (byte)0xF5, (byte)0xF6, (byte)0xF7, (byte)0xF8, (byte)0xF9, (byte)0xFA, (byte)0xFB, (byte)0xFC, (byte)0xFD, (byte)0xFE, + (byte)0xFF + }; @Rule public InjectMembersRule injectMembersRule = new InjectMembersRule(new TestingDataSourceModule()); @@ -1913,8 +1935,12 @@ public void testBlobFields() throws SQLException { field("column1").eq(blobLiteral(BLOB1_VALUE.getBytes())), field("column1").eq(blobLiteral(BLOB1_VALUE)) )); + // this update fails to work as an update without a WHERE clause - it strangely inserts a duplicate row on Postgres without a where clause UpdateStatement updateStatement = update(tableRef("BlobTable")) - .set(blobLiteral(BLOB1_VALUE + " Updated").as("column1"), blobLiteral((BLOB2_VALUE + " Updated").getBytes()).as("column2")); + .set(blobLiteral(BLOB1_VALUE + " Updated").as("column1"), blobLiteral((BLOB2_VALUE + " Updated").getBytes()).as("column2")) + .where( + field("column1").eq(blobLiteral((BLOB1_VALUE).getBytes())) + ); SelectStatement selectStatementAfterUpdate = select(field("column1"), field("column2")) .from(tableRef("BlobTable")) .where(or( @@ -1928,19 +1954,32 @@ public void testBlobFields() throws SQLException { // Check result - note that this is deliberately not tidy - we are making sure that results get // passed back up to this scope correctly. String sql = convertStatementToSQL(selectStatementAfterInsert); + AtomicBoolean isFirstValueHex = new AtomicBoolean(false); Integer numberOfRecords = executor.executeQuery(sql, connection, new ResultSetProcessor() { @Override public Integer process(ResultSet resultSet) throws SQLException { int result = 0; while (resultSet.next()) { result++; - assertEquals("column1 blob value not correctly set/returned after insert", BLOB1_VALUE, new String(resultSet.getBytes(1))); - assertEquals("column2 blob value not correctly set/returned after insert", BLOB2_VALUE, new String(resultSet.getBytes(2))); + byte[] bytesFromFirst = resultSet.getBytes("column1"); + + if (bytesFromFirst[1] == 32) { // if second char is a space then it isn't hex encoded + assertEquals("column1 blob value not correctly set/returned after insert", BLOB1_VALUE, new String(resultSet.getBytes(1))); + assertEquals("column2 blob value not correctly set/returned after insert", BLOB2_VALUE, new String(resultSet.getBytes(2))); + } else { + isFirstValueHex.set(true); + assertEquals("column1 blob value not correctly set/returned after insert", BLOB1_VALUE, decodeBlobHexFromBytesToText(resultSet.getBytes(1))); + assertEquals("column2 blob value not correctly set/returned after insert", BLOB2_VALUE, decodeBlobHexFromBytesToText(resultSet.getBytes(2))); + } } return result; } }); - assertEquals("Should be exactly two records", 2, numberOfRecords.intValue()); + if (isFirstValueHex.get()) { + assertEquals("Should be exactly one record", 1, numberOfRecords.intValue()); + } else { + assertEquals("Should be exactly two records", 2, numberOfRecords.intValue()); + } // Update executor.execute(ImmutableList.of(convertStatementToSQL(updateStatement)), connection); @@ -1948,21 +1987,215 @@ public Integer process(ResultSet resultSet) throws SQLException { // Check result- note that this is deliberately not tidy - we are making sure that results get // passed back up to this scope correctly. sql = convertStatementToSQL(selectStatementAfterUpdate); + AtomicBoolean isUpdateFirstValueHex = new AtomicBoolean(false); numberOfRecords = executor.executeQuery(sql, connection, new ResultSetProcessor() { @Override public Integer process(ResultSet resultSet) throws SQLException { int result = 0; while (resultSet.next()) { result++; + byte[] bytesFromFirst = resultSet.getBytes("column1"); + if (bytesFromFirst[1] == 32) { // if second char is a space then it isn't hex encoded assertEquals("column1 blob value not correctly set/returned after update", BLOB1_VALUE + " Updated", new String(resultSet.getBytes(1))); assertEquals("column2 blob value not correctly set/returned after update", BLOB2_VALUE + " Updated", new String(resultSet.getBytes(2))); + } else { + isUpdateFirstValueHex.set(true); + assertEquals("column1 blob value not correctly set/returned after update", BLOB1_VALUE + " Updated", decodeBlobHexFromBytesToText(resultSet.getBytes(1))); + assertEquals("column2 blob value not correctly set/returned after update", BLOB2_VALUE + " Updated", decodeBlobHexFromBytesToText(resultSet.getBytes(2))); + } } return result; } }); - assertEquals("Should be exactly two records", 2, numberOfRecords.intValue()); + if (isUpdateFirstValueHex.get()) { + assertEquals("Should be exactly one records", 1, numberOfRecords.intValue()); + } else { + assertEquals("Should be exactly two records", 2, numberOfRecords.intValue()); + } + } + + + /** + * Test the behaviour of SELECTs, INSERTs and UPDATEs of blob fields. In the process + * we test a lot of {@link SqlScriptExecutor}'s statement handling capabilities + * + * @throws SQLException if something goes wrong. + */ + @Test + public void testBlobFieldsRealBinary() { // throws SQLException + SqlScriptExecutor executor = sqlScriptExecutorProvider.get(new LoggingSqlScriptVisitor()); + + // Set up queries + InsertStatement insertStatement = insert() + .into(tableRef("BlobTable")) + .fields(field("column1"), field("column2")) + .values(blobLiteral(BLOB3_VALUE).as("column1"), blobLiteral(BLOB3_VALUE).as("column2")); + SelectStatement selectStatementAfterInsert = select(field("column1"), field("column2")) + .from(tableRef("BlobTable")) + .where(or( + field("column1").eq(blobLiteral(BLOB3_VALUE)), + field("column1").eq(blobLiteral(BLOB3_VALUE)) + )); + + byte[] bytUpdated = Arrays.copyOf(BLOB3_VALUE, 256+3); + bytUpdated[256] = 1; + bytUpdated[257] = 2; + bytUpdated[258] = 3; + // this update fails to work as an update without a WHERE clause - it strangely inserts a duplicate row on Postgres without a where clause + UpdateStatement updateStatement = update(tableRef("BlobTable")) + .set(blobLiteral(bytUpdated).as("column1"), blobLiteral(bytUpdated).as("column2")) + .where( + field("column1").eq(blobLiteral(BLOB3_VALUE)) + ); + SelectStatement selectStatementAfterUpdate = select(field("column1"), field("column2")) + .from(tableRef("BlobTable")) + .where(or( + field("column1").eq(blobLiteral(bytUpdated)), + field("column1").eq(blobLiteral(bytUpdated)) + )); + + // Insert + executor.execute(convertStatementToSQL(insertStatement, schema, null), connection); + + boolean isOracle = false; + + try { + String databaseProductName = this.dataSource.getConnection().getMetaData().getDatabaseProductName(); + isOracle = databaseProductName.contains("Oracle"); + } catch (SQLException e) { + // ignore SQLException + } + + if (isOracle) { + // for Oracle need to compare BLOB's with DBMS_LOB.INSTR + AliasedField compareFunctionBlob = PortableSqlFunction.builder() + .withFunctionForDatabaseType("ORACLE", + "DBMS_LOB.INSTR", + new FieldReference("column1"), + blobLiteral(BLOB3_VALUE), + new FieldLiteral("1"), + new FieldLiteral("1") + ) + .build(); + + AliasedField compareFunctionUpdated = PortableSqlFunction.builder() + .withFunctionForDatabaseType("ORACLE", + "DBMS_LOB.INSTR", + new FieldReference("column1"), + blobLiteral(bytUpdated), + new FieldLiteral("1"), + new FieldLiteral("1") + ) + .build(); + + selectStatementAfterInsert = select(field("column1"), field("column2")) + .from(tableRef("BlobTable")) + .where( + compareFunctionBlob.greaterThan(0) + ); + updateStatement = update(tableRef("BlobTable")) + .set(blobLiteral(bytUpdated).as("column1"), blobLiteral(bytUpdated).as("column2")) + .where( + compareFunctionBlob.greaterThan(0) + ); + selectStatementAfterUpdate = select(field("column1"), field("column2")) + .from(tableRef("BlobTable")) + .where( + compareFunctionUpdated.greaterThan(0) + ); } + // Check result - note that this is deliberately not tidy - we are making sure that results get + // passed back up to this scope correctly. + String sql = convertStatementToSQL(selectStatementAfterInsert); + AtomicBoolean isFirstValueHex = new AtomicBoolean(false); + Integer numberOfRecords = executor.executeQuery(sql, connection, new ResultSetProcessor() { + @Override + public Integer process(ResultSet resultSet) throws SQLException { + int result = 0; + while (resultSet.next()) { + result++; + byte[] bytesFromFirst = resultSet.getBytes("column1"); + + if (bytesFromFirst[3] == 0x03) { // if 4th char is 0x03 then it isn't hex encoded like in Postgres + assertEquals("column1 blob value not correctly set/returned after insert", 0, Arrays.compare(BLOB3_VALUE, resultSet.getBytes(1))); + assertEquals("column2 blob value not correctly set/returned after insert", 0, Arrays.compare(BLOB3_VALUE, resultSet.getBytes(2))); + } else { + isFirstValueHex.set(true); + assertEquals("column1 blob value not correctly set/returned after insert", 0, Arrays.compare(BLOB3_VALUE, decodeBlobHexFromBytesToByteArray(resultSet.getBytes(1)))); + assertEquals("column2 blob value not correctly set/returned after insert", 0, Arrays.compare(BLOB3_VALUE, decodeBlobHexFromBytesToByteArray(resultSet.getBytes(2)))); + } + } + return result; + } + }); + + assertEquals("Should be exactly one record", 1, numberOfRecords.intValue()); + + // Update + executor.execute(ImmutableList.of(convertStatementToSQL(updateStatement)), connection); + + // Check result- note that this is deliberately not tidy - we are making sure that results get + // passed back up to this scope correctly. + sql = convertStatementToSQL(selectStatementAfterUpdate); + AtomicBoolean isUpdateFirstValueHex = new AtomicBoolean(false); + numberOfRecords = executor.executeQuery(sql, connection, new ResultSetProcessor() { + @Override + public Integer process(ResultSet resultSet) throws SQLException { + int result = 0; + while (resultSet.next()) { + result++; + byte[] bytesFromFirst = resultSet.getBytes("column1"); + if (bytesFromFirst[3] == 0x03) { // if second char is a space then it isn't hex encoded + assertEquals("column1 blob value not correctly set/returned after update", 0, Arrays.compare(bytUpdated, resultSet.getBytes(1))); + assertEquals("column2 blob value not correctly set/returned after update", 0, Arrays.compare(bytUpdated, resultSet.getBytes(2))); + } else { + isUpdateFirstValueHex.set(true); + assertEquals("column1 blob value not correctly set/returned after update", 0, Arrays.compare(bytUpdated, decodeBlobHexFromBytesToByteArray(resultSet.getBytes(1)))); + assertEquals("column2 blob value not correctly set/returned after update", 0, Arrays.compare(bytUpdated, decodeBlobHexFromBytesToByteArray(resultSet.getBytes(2)))); + } + } + return result; + } + }); + assertEquals("Should be exactly one records", 1, numberOfRecords.intValue()); + } + + private static byte[] decodeBlobHexFromBytesToByteArray(byte[] bytSrc) { + Hex hexUtil = new Hex(); + int lenSrc = bytSrc.length; + char[] charBlob = new char[lenSrc]; + byte[] bytBlob = new byte[charBlob.length >> 1]; + try { + for (int i = 0; i < bytSrc.length; i++) { + charBlob[i] = (char) bytSrc[i]; + } + hexUtil.decodeHex(charBlob, bytBlob, 0); + } catch (DecoderException e) { + throw new RuntimeException(e); + } + return bytBlob; + } + + private static String decodeBlobHexFromBytesToText(byte[] bytSrc) { + String blobStringResult; + Hex hexUtil = new Hex(); + try { + int lenSrc = bytSrc.length; + char[] charBlob = new char[lenSrc]; + byte[] bytBlob = new byte[charBlob.length >> 1]; + for (int i = 0; i < bytSrc.length; i++) { + charBlob[i] = (char) bytSrc[i]; + } + hexUtil.decodeHex(charBlob, bytBlob, 0); + + blobStringResult = new String(bytBlob); + } catch (DecoderException e) { + throw new RuntimeException(e); + } + return blobStringResult; + } + /** * Asserts that the number of records in the table are as expected. diff --git a/morf-integration-test/src/test/java/org/alfasoftware/morf/jdbc/TestDatabaseMetaDataProvider.java b/morf-integration-test/src/test/java/org/alfasoftware/morf/jdbc/TestDatabaseMetaDataProvider.java index 8bde7df40..c3960a83a 100755 --- a/morf-integration-test/src/test/java/org/alfasoftware/morf/jdbc/TestDatabaseMetaDataProvider.java +++ b/morf-integration-test/src/test/java/org/alfasoftware/morf/jdbc/TestDatabaseMetaDataProvider.java @@ -25,7 +25,6 @@ import static org.alfasoftware.morf.sql.SqlUtils.field; import static org.alfasoftware.morf.sql.SqlUtils.select; import static org.alfasoftware.morf.sql.SqlUtils.tableRef; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; @@ -33,7 +32,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalToIgnoringCase; import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -67,6 +69,7 @@ import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.UncheckedExecutionException; import com.google.inject.Inject; import net.jcip.annotations.NotThreadSafe; @@ -131,7 +134,12 @@ public class TestDatabaseMetaDataProvider { column("bigIntegerCol", DataType.BIG_INTEGER).defaultValue("8"), column("booleanCol", DataType.BOOLEAN).defaultValue("1"), column("integerTenCol", DataType.INTEGER).defaultValue("17"), - column("dateCol", DataType.DATE).defaultValue("2020-01-01")) + column("dateCol", DataType.DATE).defaultValue("2020-01-01")), + table("WithPartition") + .columns( + SchemaUtils.idColumn(), + column("stringCol", DataType.STRING, 20) + ) ), schema( view("ViewWithTypes", select(field("primaryStringCol"), field("id")).from("WithTypes").crossJoin(tableRef("WithDefaults"))), @@ -208,6 +216,7 @@ public void testViewsAndTables() throws SQLException { tableNameEqualTo("WithTypes"), tableNameEqualTo("WithDefaults"), tableNameEqualTo("WithLobs"), + tableNameEqualTo("WithPartition"), equalToIgnoringCase("WithTimestamp") // can read table names even if they contain unsupported columns ))); @@ -216,6 +225,7 @@ public void testViewsAndTables() throws SQLException { tableNameMatcher("WithTypes"), tableNameMatcher("WithDefaults"), tableNameMatcher("WithLobs"), + tableNameMatcher("WithPartition"), propertyMatcher(Table::getName, "name", equalToIgnoringCase("WithTimestamp")) // can read table names even if they contain unsupported columns ))); } @@ -284,7 +294,7 @@ public void testTableWithTypes() throws SQLException { )); schemaResource.getAdditionalMetadata().ifPresent(additionalMetadata -> - assertThat(additionalMetadata.ignoredIndexes().get("WithTypes".toLowerCase()), containsInAnyOrder(ImmutableList.of( + assertThat(additionalMetadata.ignoredIndexes().get("WithTypes"), containsInAnyOrder(ImmutableList.of( indexMatcher(index("WithTypes_PRF1").columns("decimalNineFiveCol", "bigIntegerCol")) )))); } @@ -319,6 +329,36 @@ public void testTableWithLobs() throws SQLException { } + @Test + public void testTableWithPartition() throws SQLException { + boolean isPostgres = databaseType.equals("PGSQL"); + // RE-CREATE table with two partitions on table WithPartition + try (Connection connection = database.getDataSource().getConnection()) { + if (isPostgres) { + String tableSchema = Strings.isNullOrEmpty(database.getSchemaName()) ? "" : database.getSchemaName() + "."; + connection.createStatement().executeUpdate("DROP TABLE " + tableSchema + "WithPartition"); + connection.createStatement().executeUpdate("CREATE TABLE " + tableSchema + "WithPartition(id numeric(19) NOT NULL, stringCol VARCHAR(20)) PARTITION BY RANGE (id)"); + connection.createStatement().executeUpdate("CREATE TABLE " + tableSchema + "WithPartition_p0 PARTITION OF " + tableSchema + "WithPartition FOR VALUES FROM (0) TO (10000)"); + connection.createStatement().executeUpdate("CREATE TABLE " + tableSchema + "WithPartition_p1 PARTITION OF " + tableSchema + "WithPartition FOR VALUES FROM (10000) TO (99999)"); + } + } + + try(SchemaResource schemaResource = database.openSchemaResource()) { + assertTrue(schemaResource.tableExists("WithPartition")); + + if (isPostgres) { + UncheckedExecutionException uncheckedExecutionException = assertThrows(UncheckedExecutionException.class, () -> schemaResource.getTable("WithPartition_p0")); + assertTrue("partition must not be found on getTable", uncheckedExecutionException.getMessage().contains("Table [WithPartition_p0/*] not found.")); + + Table table = schemaResource.getTable("WithPartition"); + assertEquals("table must have 2 columns", 2, table.columns().size()); + assertEquals("first column must match", "id", table.columns().get(0).getName()); + assertEquals("second column column must match", "stringcol", table.columns().get(1).getName()); + } + } + } + + @Test public void testTableWithDefaults() throws SQLException { try(SchemaResource schemaResource = database.openSchemaResource()) { diff --git a/morf-integration-test/src/test/java/org/alfasoftware/morf/upgrade/TestDatabaseUpgradePathValidationService.java b/morf-integration-test/src/test/java/org/alfasoftware/morf/upgrade/TestDatabaseUpgradePathValidationService.java index d5ef681ec..a78e1e68d 100644 --- a/morf-integration-test/src/test/java/org/alfasoftware/morf/upgrade/TestDatabaseUpgradePathValidationService.java +++ b/morf-integration-test/src/test/java/org/alfasoftware/morf/upgrade/TestDatabaseUpgradePathValidationService.java @@ -81,6 +81,8 @@ public void setup() { @After public void tearDown() { schemaManager.invalidateCache(); + // to make following test on test suite run clean - org.alfasoftware.morf.upgrade.TestFullDeployment + schemaManager.dropAllTables(); } diff --git a/morf-mysql/src/main/java/org/alfasoftware/morf/jdbc/mysql/MySqlMetaDataProvider.java b/morf-mysql/src/main/java/org/alfasoftware/morf/jdbc/mysql/MySqlMetaDataProvider.java index cb471aeb0..f13db5325 100755 --- a/morf-mysql/src/main/java/org/alfasoftware/morf/jdbc/mysql/MySqlMetaDataProvider.java +++ b/morf-mysql/src/main/java/org/alfasoftware/morf/jdbc/mysql/MySqlMetaDataProvider.java @@ -84,4 +84,5 @@ protected ColumnBuilder setAdditionalColumnMetadata(RealName tableName, ColumnBu protected String buildSequenceSql(String schemaName) { return null; } + } diff --git a/morf-mysql/src/test/java/org/alfasoftware/morf/jdbc/mysql/TestMySqlDialect.java b/morf-mysql/src/test/java/org/alfasoftware/morf/jdbc/mysql/TestMySqlDialect.java index cfeb5bae7..9343df0a2 100755 --- a/morf-mysql/src/test/java/org/alfasoftware/morf/jdbc/mysql/TestMySqlDialect.java +++ b/morf-mysql/src/test/java/org/alfasoftware/morf/jdbc/mysql/TestMySqlDialect.java @@ -105,7 +105,9 @@ protected List expectedCreateTableStatements() { "ALTER TABLE `Alternate` ADD INDEX `Alternate_1` (`stringField`)", "CREATE TABLE `NonNull` (`id` BIGINT NOT NULL, `version` INTEGER DEFAULT 0, `stringField` VARCHAR(3) NOT NULL, `intField` DECIMAL(8,0) NOT NULL, `booleanField` TINYINT(1) NOT NULL, `dateField` DATE NOT NULL, `blobField` LONGBLOB NOT NULL, CONSTRAINT `NonNull_PK` PRIMARY KEY (`id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin", "CREATE TABLE `CompositePrimaryKey` (`id` BIGINT NOT NULL, `version` INTEGER DEFAULT 0, `stringField` VARCHAR(3) NOT NULL, `secondPrimaryKey` VARCHAR(3) NOT NULL, CONSTRAINT `CompositePrimaryKey_PK` PRIMARY KEY (`id`, `secondPrimaryKey`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin", - "CREATE TABLE `AutoNumber` (`intField` BIGINT AUTO_INCREMENT COMMENT 'AUTONUMSTART:[5]', CONSTRAINT `AutoNumber_PK` PRIMARY KEY (`intField`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=5" + "CREATE TABLE `AutoNumber` (`intField` BIGINT AUTO_INCREMENT COMMENT 'AUTONUMSTART:[5]', CONSTRAINT `AutoNumber_PK` PRIMARY KEY (`intField`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=5", + "CREATE TABLE `Measurement` (`intField` DECIMAL(8,0) NOT NULL, `dateField` DATE NOT NULL, `stringField` VARCHAR(3) NOT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin", + "CREATE TABLE `MeasurementHash` (`intField` DECIMAL(8,0) NOT NULL, `dateField` DATE NOT NULL, `stringField` VARCHAR(3) NOT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin" ); } diff --git a/morf-oracle/src/main/java/org/alfasoftware/morf/jdbc/oracle/OracleMetaDataProvider.java b/morf-oracle/src/main/java/org/alfasoftware/morf/jdbc/oracle/OracleMetaDataProvider.java index 817145484..dd10523f8 100755 --- a/morf-oracle/src/main/java/org/alfasoftware/morf/jdbc/oracle/OracleMetaDataProvider.java +++ b/morf-oracle/src/main/java/org/alfasoftware/morf/jdbc/oracle/OracleMetaDataProvider.java @@ -382,7 +382,7 @@ public void handle(ResultSet resultSet) throws SQLException { if (DatabaseMetaDataProviderUtils.shouldIgnoreIndex(indexName)) { Index ignoredIndex = getAssembledIndex(unique, indexNameFinal); - String currentTableName = currentTable.getName().toUpperCase(); + String currentTableName = currentTable.getName(); if (ignoredIndexes.containsKey(currentTableName)) { ignoredIndexes.compute(currentTableName, (k, tableIgnoredIndexes) -> { List newList = tableIgnoredIndexes == null ? new ArrayList<>() : new ArrayList<>(tableIgnoredIndexes); @@ -413,6 +413,16 @@ public boolean isUnique() { return unique; } + @Override + public boolean isGlobalPartitioned() { + return false; + } + + @Override + public boolean isLocalPartitioned() { + return false; + } + @Override public String getName() { @@ -471,7 +481,7 @@ public void handle(ResultSet resultSet) throws SQLException { if (DatabaseMetaDataProviderUtils.shouldIgnoreIndex(indexName)) { Index lastIndex = null; - for (Index currentIndex : ignoredIndexes.get(currentTable.getName().toUpperCase())) { + for (Index currentIndex : ignoredIndexes.get(currentTable.getName())) { if (currentIndex.getName().equalsIgnoreCase(indexName)) { lastIndex = currentIndex; break; @@ -1118,6 +1128,11 @@ public int getAutoNumberStart() { return autoIncrementFrom; } + @Override + public boolean isPartitioned() { + return false; + } + @Override public DataType getType() { return columnType.get().getType(); diff --git a/morf-oracle/src/test/java/org/alfasoftware/morf/jdbc/oracle/TestOracleDialect.java b/morf-oracle/src/test/java/org/alfasoftware/morf/jdbc/oracle/TestOracleDialect.java index d02d3f91e..65d56ebed 100755 --- a/morf-oracle/src/test/java/org/alfasoftware/morf/jdbc/oracle/TestOracleDialect.java +++ b/morf-oracle/src/test/java/org/alfasoftware/morf/jdbc/oracle/TestOracleDialect.java @@ -225,7 +225,17 @@ protected List expectedCreateTableStatements() { "END;", "COMMENT ON TABLE TESTSCHEMA.AutoNumber IS '"+OracleDialect.REAL_NAME_COMMENT_LABEL+":[AutoNumber]'", - "COMMENT ON COLUMN TESTSCHEMA.AutoNumber.intField IS '"+OracleDialect.REAL_NAME_COMMENT_LABEL+":[intField]/TYPE:[BIG_INTEGER]/AUTONUMSTART:[5]'" + "COMMENT ON COLUMN TESTSCHEMA.AutoNumber.intField IS '"+OracleDialect.REAL_NAME_COMMENT_LABEL+":[intField]/TYPE:[BIG_INTEGER]/AUTONUMSTART:[5]'", + "CREATE TABLE TESTSCHEMA.Measurement (intField DECIMAL(8,0) NOT NULL, dateField DATE NOT NULL, stringField NVARCHAR2(3) NOT NULL)", + "COMMENT ON TABLE TESTSCHEMA.Measurement IS 'REALNAME:[Measurement]'", + "COMMENT ON COLUMN TESTSCHEMA.Measurement.intField IS 'REALNAME:[intField]/TYPE:[DECIMAL]'", + "COMMENT ON COLUMN TESTSCHEMA.Measurement.dateField IS 'REALNAME:[dateField]/TYPE:[DATE]'", + "COMMENT ON COLUMN TESTSCHEMA.Measurement.stringField IS 'REALNAME:[stringField]/TYPE:[STRING]'", + "CREATE TABLE TESTSCHEMA.MeasurementHash (intField DECIMAL(8,0) NOT NULL, dateField DATE NOT NULL, stringField NVARCHAR2(3) NOT NULL)", + "COMMENT ON TABLE TESTSCHEMA.MeasurementHash IS 'REALNAME:[MeasurementHash]'", + "COMMENT ON COLUMN TESTSCHEMA.MeasurementHash.intField IS 'REALNAME:[intField]/TYPE:[DECIMAL]'", + "COMMENT ON COLUMN TESTSCHEMA.MeasurementHash.dateField IS 'REALNAME:[dateField]/TYPE:[DATE]'", + "COMMENT ON COLUMN TESTSCHEMA.MeasurementHash.stringField IS 'REALNAME:[stringField]/TYPE:[STRING]'" ); } diff --git a/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLDialect.java b/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLDialect.java index 37c84cb63..e1faee336 100644 --- a/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLDialect.java +++ b/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLDialect.java @@ -15,6 +15,7 @@ import java.util.Objects; import java.util.Optional; import java.util.StringJoiner; +import java.util.concurrent.atomic.AtomicInteger; import org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider; import org.alfasoftware.morf.jdbc.DatabaseType; @@ -23,7 +24,12 @@ import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; import org.alfasoftware.morf.metadata.DataValueLookup; +import org.alfasoftware.morf.metadata.DatePartitionedByPeriodRule; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitionByHash; +import org.alfasoftware.morf.metadata.PartitionByRange; +import org.alfasoftware.morf.metadata.PartitioningByHashRule; +import org.alfasoftware.morf.metadata.PartitioningRuleType; import org.alfasoftware.morf.metadata.SchemaResource; import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.Sequence; @@ -56,6 +62,7 @@ import org.alfasoftware.morf.sql.element.SqlParameter; import org.alfasoftware.morf.sql.element.TableReference; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import com.google.common.base.Joiner; import com.google.common.collect.FluentIterable; @@ -275,6 +282,28 @@ private List createTableStatement(Table table) { createTableStatement.append(")"); + // add "PARTITION BY" clause and extra partition tables if table is partitioned. + if (table.isPartitioned()) { + Optional partitionColumn = table.columns().stream().filter(Column::isPartitioned).findFirst(); + if (partitionColumn.isEmpty() || table.partitioningRule() == null && !table.partitions().column().getName().equals(partitionColumn.get().getName()) + || (table.partitioningRule() != null && !table.partitioningRule().getColumn().equals(partitionColumn.get().getName()))) { + throw new IllegalArgumentException("Partitioning rule does not match partition column. Table: " + table.getName()); + } + + // add PARTITION BY clause + PartitioningRuleType partitioningRuleType = table.partitioningRule() == null ? table.partitions().partitioningType() + : table.partitioningRule().getPartitioningType(); + if (partitioningRuleType.equals(PartitioningRuleType.rangePartitioning)) { + createTableStatement.append(" PARTITION BY RANGE ("); + } else if (partitioningRuleType.equals(PartitioningRuleType.hashPartitioning)) { + createTableStatement.append(" PARTITION BY HASH ("); + } + createTableStatement.append(partitionColumn.get().getName()); + createTableStatement.append(')'); + // explode PARTITION TABLES + postStatements.addAll(createTablePartitions(table, partitioningRuleType)); + } + ImmutableList.Builder statements = ImmutableList.builder() .addAll(preStatements) .add(createTableStatement.toString()); @@ -285,6 +314,67 @@ private List createTableStatement(Table table) { } + private List createTablePartitions(Table table, PartitioningRuleType partitioningRuleType) { + List statements = new ArrayList<>(); + + if (table.partitioningRule() instanceof DatePartitionedByPeriodRule) { + createPartitionByDateRangeStatements(table, statements); + } else if (table.partitioningRule() instanceof PartitioningByHashRule) { + createPartitionByHashStatements(table, statements, (PartitioningByHashRule) table.partitioningRule()); + } else if (partitioningRuleType.equals(PartitioningRuleType.rangePartitioning)) { + table.partitions().getPartitions().forEach(partition -> { + PartitionByRange range = (PartitionByRange) partition; + statements.add(createTablePartitionRangeStatement(table, table.getName(), partition.name(), Pair.of(range.start(), range.end()))); + }); + } else if (partitioningRuleType.equals(PartitioningRuleType.hashPartitioning)) { + table.partitions().getPartitions().forEach(partition -> { + PartitionByHash hashRange = (PartitionByHash) partition; + statements.add(createTablePartitionHashStatement(table, table.getName(), partition.name(), Integer.parseInt(hashRange.divider()), + Integer.parseInt(hashRange.remainder()))); + }); + } else { + throw new IllegalArgumentException("Partition rule is not supported"); + } + + return statements; + } + + private void createPartitionByHashStatements(Table table, List statements, PartitioningByHashRule rule) { + String sourceTableName = schemaNamePrefix(table) + table.getName(); + String partitionTableNamePrefix = sourceTableName + "_p"; + AtomicInteger i = new AtomicInteger(1); + rule.getHashRemainders().forEach(remainder -> { + String tablePartitionName = partitionTableNamePrefix + i.getAndIncrement(); + statements.add(createTablePartitionHashStatement(table, sourceTableName, tablePartitionName, + rule.getHashDivider(), remainder)); + }); + } + + + private String createTablePartitionHashStatement(Table table, String sourceTableName, String tablePartitionName, int modulus, int remainder) { + return "CREATE TABLE " + tablePartitionName + " PARTITION OF " + sourceTableName + + " FOR VALUES WITH (MODULUS " + modulus + ", REMAINDER " + remainder + ")"; + } + + + private void createPartitionByDateRangeStatements(Table table, List statements) { + DatePartitionedByPeriodRule datePartitionedByPeriodRule = (DatePartitionedByPeriodRule) table.partitioningRule(); + String sourceTableName = schemaNamePrefix(table) + table.getName(); + String partitionTableNamePrefix = sourceTableName + "_p"; + AtomicInteger i = new AtomicInteger(1); + datePartitionedByPeriodRule.getRanges().forEach(pair -> { + String tablePartitionName = partitionTableNamePrefix + i.getAndIncrement(); + statements.add(createTablePartitionRangeStatement(table, sourceTableName, tablePartitionName, + Pair.of(pair.getLeft().toString("yyyy-MM-dd"), pair.getRight().toString("yyyy-MM-dd")))); + }); + } + + + private String createTablePartitionRangeStatement(Table table, String sourceTableName, String tablePartitionName, Pair range) { + return "CREATE TABLE " + tablePartitionName + " PARTITION OF " + sourceTableName + + " FOR VALUES FROM ('" + range.getLeft() + "') TO ('" + range.getRight() + "')"; + } + /** * Private method to form the SQL statement required to create a sequence in the schema. * @@ -522,7 +612,9 @@ protected String getSqlFrom(ConcatenatedField concatenatedField) { @Override protected String getSqlFrom(BlobFieldLiteral field) { - return String.format("E'\\x%s'", field.getValue()); + // this format doesn't work with blob fields: String.format("E'\\x%s'", field.getValue()); + // see org.alfasoftware.morf.integration.TestSqlStatements#testBlobFields + return String.format("'%s'", field.getValue()); } @Override diff --git a/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLMetaDataProvider.java b/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLMetaDataProvider.java index 1295800d7..60703af81 100644 --- a/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLMetaDataProvider.java +++ b/morf-postgresql/src/main/java/org/alfasoftware/morf/jdbc/postgresql/PostgreSQLMetaDataProvider.java @@ -5,12 +5,16 @@ import static org.alfasoftware.morf.jdbc.DatabaseMetaDataProviderUtils.shouldIgnoreIndex; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -21,9 +25,15 @@ import org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider; import org.alfasoftware.morf.jdbc.RuntimeSqlException; import org.alfasoftware.morf.metadata.AdditionalMetadata; +import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; +import org.alfasoftware.morf.metadata.Partition; +import org.alfasoftware.morf.metadata.PartitioningRuleType; +import org.alfasoftware.morf.metadata.Partitions; +import org.alfasoftware.morf.metadata.SchemaUtils; import org.alfasoftware.morf.metadata.Index; import org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder; +import org.alfasoftware.morf.metadata.Table; import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -51,6 +61,47 @@ public PostgreSQLMetaDataProvider(Connection connection, String schemaName) { } + @Override + protected Set getIgnoredTables() { + Set ignoredTables = new HashSet<>(); + try(Statement ignoredTablesStmt = connection.createStatement()) { + // distinguish partitioned tables from regular ones: relkind = 'p' (partition) or 'r' (regular) also can use boolean col relispartition + // a partition table attached has (r, true) + try (ResultSet ignoredTablesRs = ignoredTablesStmt.executeQuery("select relname from pg_class where relispartition and relkind = 'r'")) { + while (ignoredTablesRs.next()) { + ignoredTables.add(ignoredTablesRs.getString(1).toLowerCase(Locale.ROOT)); + } + } + } catch (SQLException e) { + // ignore exception, if it fails then incompatible Postgres version + } + return ignoredTables; + } + + @Override + protected Set getPartitionedTables() { + Set partitionedTables = new HashSet<>(); + try(Statement partitionedTablesStmt = connection.createStatement()) { + // distinguish partitioned tables from regular ones: relkind = 'p' (partition) or 'r' (regular) also can use boolean col relispartition + // a partition table attached has (r, true) + // a partition table has (p, false) + try (ResultSet ignoredTablesRs = partitionedTablesStmt.executeQuery("select relname from pg_class where not relispartition and relkind = 'p'")) { + while (ignoredTablesRs.next()) { + partitionedTables.add(ignoredTablesRs.getString(1).toLowerCase(Locale.ROOT)); + } + } + } catch (SQLException e) { + // ignore exception, if it fails then incompatible Postgres version + } + return partitionedTables; + } + + + @Override + protected boolean isIgnoredTable(@SuppressWarnings("unused") RealName tableName) { + return ignoredTables.get().contains(tableName.getDbName().toLowerCase(Locale.ROOT)); + } + @Override protected boolean isPrimaryKeyIndex(RealName indexName) { return indexName.getDbName().endsWith("_pk"); @@ -143,7 +194,7 @@ private Map> loadIgnoredIndexes() { private ImmutableMap> loadAllIgnoredIndexes() { ImmutableMap.Builder> ignoredIndexes = ImmutableMap.builder(); for (RealName realTableName : allIgnoredIndexesTables) { - ignoredIndexes.put(realTableName.getDbName().toLowerCase(), loadTableIndexes(realTableName, true)); + ignoredIndexes.put(realTableName.getRealName(), loadTableIndexes(realTableName, true)); } return ignoredIndexes.build(); } @@ -200,6 +251,142 @@ protected Map loadAllIndexNames() { } + @Override + protected Partitions loadTablePartitions(RealName realTableName) { + SchemaUtils.PartitionsBuilder partitions = null; + final ImmutableMap.Builder> indexNames = ImmutableMap.builder(); + Table table = null; + Column partitionColumn = null; + List partitionList = new ArrayList<>(); + + String schema = StringUtils.isNotBlank(schemaName) + ? " JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace AND n.nspname = '" + schemaName + "'" + : ""; + + String sql = "SELECT t.relname AS tableName," + + " c.relname AS partitionTable, pg_get_expr(c.relpartbound, i.inhrelid) as partitionClause" + + " FROM pg_catalog.pg_inherits i" + + " JOIN pg_catalog.pg_class c ON c.oid = i.inhrelid" + + " JOIN pg_catalog.pg_class t ON t.oid = i.inhparent" + + schema + + " WHERE t.relname = ?" + + " ORDER BY t.relname"; + + String sqlForColumnName = "select par.relname as tableName, d.description, pt.partnatts as numColumns, pt.partstrat, col.column_name" + + " from (select partrelid, partnatts, partstrat, unnest(partattrs) column_index" + + " from pg_partitioned_table) pt" + + " join pg_class par on par.oid = pt.partrelid" + + " join pg_namespace n on n.oid = par.relnamespace" + + " JOIN pg_description d ON d.objoid = par.oid" + + " join information_schema.columns col on col.table_schema = n.nspname" + + " and col.table_name = par.relname and ordinal_position = pt.column_index" + + " WHERE par.relname = ?"; + + + try (PreparedStatement preparedStatementColumn = connection.prepareStatement(sqlForColumnName, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)) { + preparedStatementColumn.setString(1, realTableName.getDbName()); + try (ResultSet partitionsResultSet = preparedStatementColumn.executeQuery()) { + if (partitionsResultSet.next()) { + String tableName = partitionsResultSet.getString(1); + int numColumns = partitionsResultSet.getInt(3); + String partitionStrategy = partitionsResultSet.getString(4); + String column = partitionsResultSet.getString(5); + String comment = partitionsResultSet.getString(2); + String realName = matchComment(comment); + + if (log.isDebugEnabled()) { + log.debug("Found partitioned table [" + tableName + "] with remark [" + comment + "] parsed as [" + realName + "] in schema [" + schemaName + "]"); + } + + if (numColumns > 1) { + log.info("morf doesn't support multiple columns on partition yet"); + } else { + table = getTable(tableName); + partitionColumn = table.columns().stream().filter(column1 -> + column1.getName().toLowerCase().equals(column)).findFirst().orElse(null); + + if (partitionColumn != null) { + partitions = SchemaUtils.partitions().column(partitionColumn); + switch(partitionStrategy) { + case "r": + partitions = partitions.ruleType(PartitioningRuleType.rangePartitioning); + break; + case "h": partitions = partitions.ruleType(PartitioningRuleType.hashPartitioning); + break; + case "l": partitions = partitions.ruleType(PartitioningRuleType.listPartitioning); + break; + } + } + } + } + } + } + catch (SQLException e) { + throw new RuntimeSqlException(e); + } + + if (partitions != null) { + try (PreparedStatement preparedStatement = connection.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)) { + preparedStatement.setString(1, realTableName.getDbName()); + + try (ResultSet partitionsResultSet = preparedStatement.executeQuery()) { + while (partitionsResultSet.next()) { + String tableName = partitionsResultSet.getString(1); + String partitionName = partitionsResultSet.getString(2); + String partitionClause = partitionsResultSet.getString(3); + + if (StringUtils.isNotBlank(partitionName)) { + //RealName partitionRealName = createRealName(partitionName, partitionName); + Partition partition = null; + + switch (partitions.partitioningType()) { + case rangePartitioning: + String[] asValuesRange = partitionClause.split("'"); + String start = asValuesRange[1]; + String end = asValuesRange[3]; + partition = SchemaUtils.partitionByRange(partitionName) + .start(start).end(end); + break; + case hashPartitioning: + String[] asValuesHash = partitionClause.split(Pattern.quote("(")); + if (!asValuesHash[1].startsWith("modulus")) { + log.info("morf doesn't support a function other than modulus on partition yet"); + } else { + String[] values = asValuesHash[1].replace("modulus ", "").split(","); + String divider = values[0]; + String remainder = values[1].replace(" remainder ", "").replace(")", ""); + partition = SchemaUtils.partitionByHash(partitionName) + .divider(divider).remainder(remainder); + } + break; + case listPartitioning: + break; + default: + break; + } + + if (partition != null) { + partitionList.add(partition); + } + } + } + + if (partitions.partitioningType().equals(PartitioningRuleType.listPartitioning)) { + // list partition implementation is incomplete + return null; + } else { + return partitions.partitions(partitionList); + } + } + } catch (SQLException e) { + throw new RuntimeSqlException(e); + } + } + + return partitions; + } + + @Override protected RealName readIndexName(ResultSet indexResultSet) throws SQLException { RealName readIndexName = super.readIndexName(indexResultSet); @@ -233,4 +420,15 @@ protected String buildSequenceSql(String schemaName) { return sequenceSqlBuilder.toString(); } + + + @Override + public Collection partitionedTableNames() { + return super.partitionedTables.get(); + } + + @Override + public Collection partitionTableNames() { + return super.ignoredTables.get(); + } } diff --git a/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSQLDialect.java b/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSQLDialect.java index d50c07266..3988284a0 100644 --- a/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSQLDialect.java +++ b/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSQLDialect.java @@ -132,7 +132,27 @@ protected List expectedCreateTableStatements() { "CREATE TABLE testschema.AutoNumber (intField NUMERIC(19) DEFAULT nextval('testschema.AutoNumber_intField_seq'), CONSTRAINT AutoNumber_PK PRIMARY KEY(intField))", "ALTER SEQUENCE testschema.AutoNumber_intField_seq OWNED BY testschema.AutoNumber.intField", "COMMENT ON TABLE testschema.AutoNumber IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[AutoNumber]'", - "COMMENT ON COLUMN testschema.AutoNumber.intField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[intField]/TYPE:[BIG_INTEGER]/AUTONUMSTART:[5]'"); + "COMMENT ON COLUMN testschema.AutoNumber.intField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[intField]/TYPE:[BIG_INTEGER]/AUTONUMSTART:[5]'", + "CREATE TABLE testschema.Measurement (intField DECIMAL(8,0) NOT NULL, dateField DATE NOT NULL, stringField VARCHAR(3) COLLATE \"POSIX\" NOT NULL) PARTITION BY RANGE (dateField)", + "CREATE TABLE testschema.Measurement_p1 PARTITION OF testschema.Measurement FOR VALUES FROM ('2012-03-01') TO ('2012-04-01')", + "CREATE TABLE testschema.Measurement_p2 PARTITION OF testschema.Measurement FOR VALUES FROM ('2012-04-01') TO ('2012-05-01')", + "COMMENT ON TABLE testschema.Measurement IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[Measurement]'", + "COMMENT ON COLUMN testschema.Measurement.intField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[intField]/TYPE:[DECIMAL]'", + "COMMENT ON COLUMN testschema.Measurement.dateField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[dateField]/TYPE:[DATE]'", + "COMMENT ON COLUMN testschema.Measurement.stringField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[stringField]/TYPE:[STRING]'", + "CREATE TABLE testschema.MeasurementHash (intField DECIMAL(8,0) NOT NULL, dateField DATE NOT NULL, stringField VARCHAR(3) COLLATE \"POSIX\" NOT NULL) PARTITION BY HASH (intField)", + "CREATE TABLE testschema.MeasurementHash_p1 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 0)", + "CREATE TABLE testschema.MeasurementHash_p2 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 1)", + "CREATE TABLE testschema.MeasurementHash_p3 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 2)", + "CREATE TABLE testschema.MeasurementHash_p4 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 3)", + "CREATE TABLE testschema.MeasurementHash_p5 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 4)", + "CREATE TABLE testschema.MeasurementHash_p6 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 5)", + "CREATE TABLE testschema.MeasurementHash_p7 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 6)", + "CREATE TABLE testschema.MeasurementHash_p8 PARTITION OF testschema.MeasurementHash FOR VALUES WITH (MODULUS 8, REMAINDER 7)", + "COMMENT ON TABLE testschema.MeasurementHash IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[MeasurementHash]'", + "COMMENT ON COLUMN testschema.MeasurementHash.intField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[intField]/TYPE:[DECIMAL]'", + "COMMENT ON COLUMN testschema.MeasurementHash.dateField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[dateField]/TYPE:[DATE]'", + "COMMENT ON COLUMN testschema.MeasurementHash.stringField IS '"+PostgreSQLDialect.REAL_NAME_COMMENT_LABEL+":[stringField]/TYPE:[STRING]'"); } @@ -567,7 +587,7 @@ protected String expectedBooleanLiteral(boolean value) { */ @Override protected String expectedBlobLiteral(String value) { - return String.format("E'\\x%s'", value); + return String.format("'%s'", value); } diff --git a/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSqlMetaDataProvider.java b/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSqlMetaDataProvider.java index b98d3598b..2080e3584 100644 --- a/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSqlMetaDataProvider.java +++ b/morf-postgresql/src/test/java/org/alfasoftware/morf/jdbc/postgresql/TestPostgreSqlMetaDataProvider.java @@ -1,218 +1,342 @@ -/* Copyright 2017 Alfa Financial Software - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.alfasoftware.morf.jdbc.postgresql; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.contains; -import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.RETURNS_SMART_NULLS; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; -import java.util.Map; - -import javax.sql.DataSource; - -import org.alfasoftware.morf.jdbc.DatabaseType; -import org.alfasoftware.morf.metadata.AdditionalMetadata; -import org.alfasoftware.morf.metadata.Index; -import org.alfasoftware.morf.metadata.Schema; -import org.alfasoftware.morf.metadata.Sequence; -import org.junit.Before; -import org.junit.Test; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - - -/** - * Test class for {@link PostgreSQLMetaDataProvider} - * - * @author Copyright (c) Alfa Financial Software Ltd. 2024 - */ -public class TestPostgreSqlMetaDataProvider { - private static final String TABLE_NAME = "AREALTABLE"; - private static final String TEST_SCHEMA = "TestSchema"; - - private final DataSource dataSource = mock(DataSource.class, RETURNS_SMART_NULLS); - private final Connection connection = mock(Connection.class, RETURNS_SMART_NULLS); - private DatabaseType postgres; - - @Before - public void setup() { - postgres = DatabaseType.Registry.findByIdentifier(PostgreSQL.IDENTIFIER); - } - - - @Before - public void before() throws SQLException { - when(dataSource.getConnection()).thenReturn(connection); - } - - - /** - * Checks the SQL run for retrieving sequences information - * - * @throws SQLException exception - */ - @Test - public void testLoadSequences() throws SQLException { - // Given - final PreparedStatement statement = mock(PreparedStatement.class, RETURNS_SMART_NULLS); - when(connection.prepareStatement("SELECT S.relname FROM pg_class S LEFT JOIN pg_depend D ON " + - "(S.oid = D.objid AND D.deptype = 'a') LEFT JOIN pg_namespace N on (N.oid = S.relnamespace) WHERE S.relkind = " + - "'S' AND D.objid IS NULL AND N.nspname=?")).thenReturn(statement); - when(statement.executeQuery()).thenAnswer(new ReturnMockResultSetWithSequence(1)); - - // When - final Schema postgresMetaDataProvider = postgres.openSchema(connection, "TestDatabase", TEST_SCHEMA); - assertEquals("Sequence names", "[Sequence1]", postgresMetaDataProvider.sequenceNames().toString()); - Sequence sequence = postgresMetaDataProvider.sequences().iterator().next(); - assertEquals("Sequence name", "Sequence1", sequence.getName()); - - verify(statement).setString(1, TEST_SCHEMA); - } - - /** - * Checks the SQL run for retrieving sequences information - * - * @throws SQLException exception - */ - @Test - public void testLoadAllIgnoredIndexes() throws SQLException { - // Given - Statement statement = mock(Statement.class, RETURNS_SMART_NULLS); - when(connection.createStatement(eq(ResultSet.TYPE_FORWARD_ONLY), eq(ResultSet.CONCUR_READ_ONLY))).thenReturn(statement); - when(statement.executeQuery(anyString())).thenAnswer(answer -> { - ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); - when(resultSet.next()).thenReturn(true, true, true, false); - when(resultSet.getString(1)).thenReturn("AREALTABLE_1", "AREALTABLE_PRF1", "AREALTABLE_PRF2"); - when(resultSet.getString(2)).thenReturn("REALNAME:[AREALTABLE_1]", "REALNAME:[AREALTABLE_PRF1]", "REALNAME:[AREALTABLE_PRF2]"); - when(resultSet.getString(3)).thenReturn("AREALTABLE", "AREALTABLE", "AREALTABLE"); - when(resultSet.getString(4)).thenReturn("REALNAME:[ARealTable]", "REALNAME:[ARealTable]", "REALNAME:[ARealTable]"); - return resultSet; - }); - - DatabaseMetaData databaseMetaData = mock(DatabaseMetaData.class, RETURNS_SMART_NULLS); - when(connection.getMetaData()).thenReturn(databaseMetaData); - - // mock getTables - when(databaseMetaData.getTables(null, TEST_SCHEMA, null, new String[] { "TABLE" })) - .thenAnswer(answer -> { - ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); - when(resultSet.next()).thenReturn(true, false); - when(resultSet.getString(3)).thenReturn(TABLE_NAME); // // 3 - TABLE_NAME - when(resultSet.getString(2)).thenReturn(TEST_SCHEMA); // 2 - TABLE_SCHEM - when(resultSet.getString(5)).thenReturn("REALNAME:[AREALTABLE]"); // 5 - TABLE_REMARKS - when(resultSet.getString(4)).thenReturn("REGULAR"); // 4 - TABLE_TYPE - - return resultSet; - }); - - // mock getColumns - when(databaseMetaData.getColumns(null, TEST_SCHEMA, null, null)) - .thenAnswer(answer -> { - ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); - when(resultSet.next()).thenReturn(true, false); - when(resultSet.getString(3)).thenReturn(TABLE_NAME); // 3 - COLUMN_TABLE_NAME - when(resultSet.getString(4)).thenReturn("column1"); // 4 - COLUMN_NAME - when(resultSet.getString(6)).thenReturn("VARCHAR"); // 6 - COLUMN_TYPE_NAME - when(resultSet.getInt(5)).thenReturn(12); // 5 - COLUMN_DATA_TYPE - VARCHAR 12 - when(resultSet.getInt(7)).thenReturn(1); // 7 - COLUMN_SIZE - width - when(resultSet.getInt(9)).thenReturn(0); // 9 - COLUMN_DECIMAL_DIGITS - scale - when(resultSet.getString(12)).thenReturn("REALNAME:[column1]/TYPE:[STRING]"); // 12 - TABLE_REMARKS - when(resultSet.getString(18)).thenReturn("NO"); // 18 - COLUMN_IS_NULLABLE - when(resultSet.getString(23)).thenReturn("NO"); // 23 - COLUMN_IS_AUTOINCREMENT - when(resultSet.getString(23)).thenReturn(null); // 13 - COLUMN_DEFAULT_EXPR - - return resultSet; - }); - - // mock getIndexInfo - when(databaseMetaData.getIndexInfo(null, TEST_SCHEMA, "AREALTABLE", false, false)) - .thenAnswer(answer -> { - ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); - when(resultSet.next()).thenReturn(true, true, true, false, true, true, true, false); - when(resultSet.getString(6)).thenReturn("AREALTABLE_1", "AREALTABLE_PRF1", "AREALTABLE_PRF2", "AREALTABLE_1", "AREALTABLE_PRF1", "AREALTABLE_PRF2"); // 6 - INDEX_NAME - when(resultSet.getString(9)).thenReturn("column1", "column1", "column1", "column1", "column1", "column1"); - when(resultSet.getBoolean(4)).thenReturn(true, true, true, true, true, true); // 4 - INDEX_NON_UNIQUE - return resultSet; - }); - - // When - final Schema postgresMetaDataProvider = postgres.openSchema(connection, "TestDatabase", TEST_SCHEMA); - Map> ignoredIndexesMap = ((AdditionalMetadata)postgresMetaDataProvider).ignoredIndexes(); - // test loading the cached version: - Map> ignoredIndexesMap1 = ((AdditionalMetadata)postgresMetaDataProvider).ignoredIndexes(); - - // Then - assertEquals("map size must match", 1, ignoredIndexesMap.size()); - assertEquals("map size must match", 1, ignoredIndexesMap1.size()); - String tableNameLowerCase = TABLE_NAME.toLowerCase(); - assertEquals("table ignored indexes size must match", 2, ignoredIndexesMap.get(tableNameLowerCase).size()); - Index indexPrf1 = ignoredIndexesMap.get(tableNameLowerCase).get(0); - Index indexPrf2 = ignoredIndexesMap.get(tableNameLowerCase).get(1); - assertEquals("index prf1 name", "AREALTABLE_PRF1", indexPrf1.getName()); - assertThat("index prf1 columns", indexPrf1.columnNames(), contains("column1")); - assertEquals("index prf2 name", "AREALTABLE_PRF2", indexPrf2.getName()); - assertThat("index prf2 columns", indexPrf2.columnNames(), contains("column1")); - } - - - /** - * Mockito {@link Answer} that returns a mock result set with a given number of resultRows. - */ - private static final class ReturnMockResultSetWithSequence implements Answer { - - private final int numberOfResultRows; - - - /** - * @param numberOfResultRows - */ - private ReturnMockResultSetWithSequence(int numberOfResultRows) { - super(); - this.numberOfResultRows = numberOfResultRows; - } - - @Override - public ResultSet answer(final InvocationOnMock invocation) throws Throwable { - final ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); - when(resultSet.next()).thenAnswer(new Answer() { - private int counter; - - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - return counter++ < numberOfResultRows; - } - }); - - when(resultSet.getString(1)).thenReturn("Sequence1"); - - return resultSet; - } - } - -} +/* Copyright 2017 Alfa Financial Software + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.alfasoftware.morf.jdbc.postgresql; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.RETURNS_SMART_NULLS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Map; + +import javax.sql.DataSource; + +import org.alfasoftware.morf.jdbc.DatabaseType; +import org.alfasoftware.morf.metadata.AdditionalMetadata; +import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.Schema; +import org.alfasoftware.morf.metadata.Sequence; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + + +/** + * Test class for {@link PostgreSQLMetaDataProvider} + * + * @author Copyright (c) Alfa Financial Software Ltd. 2024 + */ +public class TestPostgreSqlMetaDataProvider { + private static final String TABLE_NAME = "AREALTABLE"; + private static final String TEST_SCHEMA = "TestSchema"; + + private final DataSource dataSource = mock(DataSource.class, RETURNS_SMART_NULLS); + private final Connection connection = mock(Connection.class, RETURNS_SMART_NULLS); + private DatabaseType postgres; + + @Before + public void setup() { + postgres = DatabaseType.Registry.findByIdentifier(PostgreSQL.IDENTIFIER); + } + + + @Before + public void before() throws SQLException { + when(dataSource.getConnection()).thenReturn(connection); + } + + + /** + * Checks the SQL run for retrieving sequences information + * + * @throws SQLException exception + */ + @Test + public void testLoadSequences() throws SQLException { + // Given + final PreparedStatement statement = mock(PreparedStatement.class, RETURNS_SMART_NULLS); + when(connection.prepareStatement("SELECT S.relname FROM pg_class S LEFT JOIN pg_depend D ON " + + "(S.oid = D.objid AND D.deptype = 'a') LEFT JOIN pg_namespace N on (N.oid = S.relnamespace) WHERE S.relkind = " + + "'S' AND D.objid IS NULL AND N.nspname=?")).thenReturn(statement); + when(statement.executeQuery()).thenAnswer(new ReturnMockResultSetWithSequence(1)); + + // When + final Schema postgresMetaDataProvider = postgres.openSchema(connection, "TestDatabase", TEST_SCHEMA); + assertEquals("Sequence names", "[Sequence1]", postgresMetaDataProvider.sequenceNames().toString()); + Sequence sequence = postgresMetaDataProvider.sequences().iterator().next(); + assertEquals("Sequence name", "Sequence1", sequence.getName()); + + verify(statement).setString(1, TEST_SCHEMA); + } + + /** + * Checks the SQL run for retrieving sequences information + * + * @throws SQLException exception + */ + @Test + public void testLoadAllIgnoredIndexes() throws SQLException { + // Given + Statement statement = mock(Statement.class, RETURNS_SMART_NULLS); + when(connection.createStatement(eq(ResultSet.TYPE_FORWARD_ONLY), eq(ResultSet.CONCUR_READ_ONLY))).thenReturn(statement); + when(statement.executeQuery(anyString())).thenAnswer(answer -> { + ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenReturn(true, true, true, false); + when(resultSet.getString(1)).thenReturn("AREALTABLE_1", "AREALTABLE_PRF1", "AREALTABLE_PRF2"); + when(resultSet.getString(2)).thenReturn("REALNAME:[AREALTABLE_1]", "REALNAME:[AREALTABLE_PRF1]", "REALNAME:[AREALTABLE_PRF2]"); + when(resultSet.getString(3)).thenReturn("AREALTABLE", "AREALTABLE", "AREALTABLE"); + when(resultSet.getString(4)).thenReturn("REALNAME:[ARealTable]", "REALNAME:[ARealTable]", "REALNAME:[ARealTable]"); + return resultSet; + }); + + DatabaseMetaData databaseMetaData = mock(DatabaseMetaData.class, RETURNS_SMART_NULLS); + when(connection.getMetaData()).thenReturn(databaseMetaData); + + // mock getTables + when(databaseMetaData.getTables(null, TEST_SCHEMA, null, new String[] { "TABLE" })) + .thenAnswer(answer -> { + ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenReturn(true, false); + when(resultSet.getString(3)).thenReturn(TABLE_NAME); // // 3 - TABLE_NAME + when(resultSet.getString(2)).thenReturn(TEST_SCHEMA); // 2 - TABLE_SCHEM + when(resultSet.getString(5)).thenReturn("REALNAME:[AREALTABLE]"); // 5 - TABLE_REMARKS + when(resultSet.getString(4)).thenReturn("REGULAR"); // 4 - TABLE_TYPE + + return resultSet; + }); + + // mock getColumns + when(databaseMetaData.getColumns(null, TEST_SCHEMA, null, null)) + .thenAnswer(answer -> { + ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenReturn(true, false); + when(resultSet.getString(3)).thenReturn(TABLE_NAME); // 3 - COLUMN_TABLE_NAME + when(resultSet.getString(4)).thenReturn("column1"); // 4 - COLUMN_NAME + when(resultSet.getString(6)).thenReturn("VARCHAR"); // 6 - COLUMN_TYPE_NAME + when(resultSet.getInt(5)).thenReturn(12); // 5 - COLUMN_DATA_TYPE - VARCHAR 12 + when(resultSet.getInt(7)).thenReturn(1); // 7 - COLUMN_SIZE - width + when(resultSet.getInt(9)).thenReturn(0); // 9 - COLUMN_DECIMAL_DIGITS - scale + when(resultSet.getString(12)).thenReturn("REALNAME:[column1]/TYPE:[STRING]"); // 12 - TABLE_REMARKS + when(resultSet.getString(18)).thenReturn("NO"); // 18 - COLUMN_IS_NULLABLE + when(resultSet.getString(23)).thenReturn("NO"); // 23 - COLUMN_IS_AUTOINCREMENT + when(resultSet.getString(23)).thenReturn(null); // 13 - COLUMN_DEFAULT_EXPR + + return resultSet; + }); + + // mock getIndexInfo + when(databaseMetaData.getIndexInfo(null, TEST_SCHEMA, "AREALTABLE", false, false)) + .thenAnswer(answer -> { + ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenReturn(true, true, true, false, true, true, true, false); + when(resultSet.getString(6)).thenReturn("AREALTABLE_1", "AREALTABLE_PRF1", "AREALTABLE_PRF2", "AREALTABLE_1", "AREALTABLE_PRF1", "AREALTABLE_PRF2"); // 6 - INDEX_NAME + when(resultSet.getString(9)).thenReturn("column1", "column1", "column1", "column1", "column1", "column1"); + when(resultSet.getBoolean(4)).thenReturn(true, true, true, true, true, true); // 4 - INDEX_NON_UNIQUE + return resultSet; + }); + + Statement statement1 = mock(Statement.class, RETURNS_SMART_NULLS); + when(connection.createStatement()).thenReturn(statement1); + when(statement1.executeQuery(anyString())).thenAnswer(answer -> { + ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenReturn(false); + return resultSet; + }); + + // When + final Schema postgresMetaDataProvider = postgres.openSchema(connection, "TestDatabase", TEST_SCHEMA); + Map> ignoredIndexesMap = ((AdditionalMetadata)postgresMetaDataProvider).ignoredIndexes(); + // test loading the cached version: + Map> ignoredIndexesMap1 = ((AdditionalMetadata)postgresMetaDataProvider).ignoredIndexes(); + + // Then + assertEquals("map size must match", 1, ignoredIndexesMap.size()); + assertEquals("map size must match", 1, ignoredIndexesMap1.size()); + String realTableName = "ARealTable"; + assertEquals("table ignored indexes size must match", 2, ignoredIndexesMap.get(realTableName).size()); + Index indexPrf1 = ignoredIndexesMap.get(realTableName).get(0); + Index indexPrf2 = ignoredIndexesMap.get(realTableName).get(1); + assertEquals("index prf1 name", "AREALTABLE_PRF1", indexPrf1.getName()); + assertThat("index prf1 columns", indexPrf1.columnNames(), contains("column1")); + assertEquals("index prf2 name", "AREALTABLE_PRF2", indexPrf2.getName()); + assertThat("index prf2 columns", indexPrf2.columnNames(), contains("column1")); + } + + + /** + * Checks the SQL run for retrieving partitioned tables information + * + * @throws SQLException exception + */ + @Test + public void testLoadPartitionedTables() throws SQLException { + // Given + final Statement statement = mock(PreparedStatement.class, RETURNS_SMART_NULLS); + when(connection.createStatement()).thenReturn(statement); + when(statement.executeQuery("select relname from pg_class where not relispartition and relkind = 'p'")) + .thenAnswer(new ReturnMockResultSetWithPartitionTables(1, "partition")); + when(statement.executeQuery("select relname from pg_class where relispartition and relkind = 'r'")) + .thenAnswer(new ReturnMockResultSetWithPartitionTables(1, "partition_p0")); + + // When + final AdditionalMetadata postgresMetaDataProvider = (AdditionalMetadata)postgres.openSchema(connection, "TestDatabase", "TestSchema"); + assertEquals("Partition Table name", "[partition]", postgresMetaDataProvider.partitionedTableNames().toString()); + String partitionTable = postgresMetaDataProvider.partitionedTableNames().iterator().next(); + assertEquals("Partition Table name", "partition", partitionTable); + } + + + /** + * Checks the SQL run for retrieving partition table information + * + * @throws SQLException exception + */ + @Test + public void testLoadPartitionTables() throws SQLException { + // Given + final Statement statement = mock(PreparedStatement.class, RETURNS_SMART_NULLS); + when(connection.createStatement()).thenReturn(statement); + when(statement.executeQuery("select relname from pg_class where not relispartition and relkind = 'p'")) + .thenAnswer(new ReturnMockResultSetWithPartitionTables(1, "partition")); + when(statement.executeQuery("select relname from pg_class where relispartition and relkind = 'r'")) + .thenAnswer(new ReturnMockResultSetWithPartitionTables(1, "partition_p0")); + + // When + final AdditionalMetadata postgresMetaDataProvider = (AdditionalMetadata)postgres.openSchema(connection, "TestDatabase", "TestSchema"); + + assertEquals("Partition Table name", "[partition_p0]", postgresMetaDataProvider.partitionTableNames().toString()); + String partitionTable = postgresMetaDataProvider.partitionTableNames().iterator().next(); + assertEquals("Partition Table name", "partition_p0", partitionTable); + } + + + /** + * Checks the SQL run for retrieving partition table information + * + * @throws SQLException exception + */ + @Test + public void testIgnoredTables() throws SQLException { + // Given + final Statement statement = mock(PreparedStatement.class, RETURNS_SMART_NULLS); + + final PreparedStatement statement1 = mock(PreparedStatement.class, RETURNS_SMART_NULLS); + when(connection.prepareStatement(anyString())).thenReturn(statement1); + + when(connection.createStatement()).thenReturn(statement); + when(statement.executeQuery("select relname from pg_class where not relispartition and relkind = 'p'")) + .thenAnswer(new ReturnMockResultSetWithPartitionTables(1, "partition")); + when(statement.executeQuery("select relname from pg_class where relispartition and relkind = 'r'")) + .thenAnswer(new ReturnMockResultSetWithPartitionTables(1, "partition_p0")); + DatabaseMetaData postgreSQLMetaDataMock = mock(DatabaseMetaData.class); + when(connection.getMetaData()).thenReturn(postgreSQLMetaDataMock); + when(postgreSQLMetaDataMock.getTables(any(), any(), any(), any())) + .thenAnswer(new ReturnMockResultSetWithSequence(0)); + + // When + final Schema postgresMetaDataProvider = postgres.openSchema(connection, "TestDatabase", "TestSchema"); + // Then + assertEquals("Partition Table name", "[partition]", postgresMetaDataProvider.tableNames().toString()); + assertFalse("Table names", postgresMetaDataProvider.tableNames().toString().contains("partition_p0")); + } + + + /** + * Mockito {@link Answer} that returns a mock result set with a given number of resultRows. + */ + private static final class ReturnMockResultSetWithSequence implements Answer { + + private final int numberOfResultRows; + + + /** + * @param numberOfResultRows + */ + private ReturnMockResultSetWithSequence(int numberOfResultRows) { + super(); + this.numberOfResultRows = numberOfResultRows; + } + + @Override + public ResultSet answer(final InvocationOnMock invocation) throws Throwable { + final ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenAnswer(new Answer() { + private int counter; + + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + return counter++ < numberOfResultRows; + } + }); + + when(resultSet.getString(1)).thenReturn("Sequence1"); + + return resultSet; + } + } + + /** + * Mockito {@link Answer} that returns a mock result set with a given number of resultRows for partition tables. + */ + private static final class ReturnMockResultSetWithPartitionTables implements Answer { + + private final int numberOfResultRows; + private final String partitionResult; + + + /** + * @param numberOfResultRows + */ + private ReturnMockResultSetWithPartitionTables(int numberOfResultRows, String partitionResult) { + super(); + this.numberOfResultRows = numberOfResultRows; + // class is rigged for just one value + this.partitionResult = partitionResult; + } + + @Override + public ResultSet answer(final InvocationOnMock invocation) throws Throwable { + final ResultSet resultSet = mock(ResultSet.class, RETURNS_SMART_NULLS); + when(resultSet.next()).thenAnswer(new Answer() { + private int counter; + + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + return counter++ < numberOfResultRows; + } + }); + + when(resultSet.getString(1)).thenReturn(partitionResult); + + return resultSet; + } + } +} diff --git a/morf-sqlserver/src/main/java/org/alfasoftware/morf/jdbc/sqlserver/SqlServerMetaDataProvider.java b/morf-sqlserver/src/main/java/org/alfasoftware/morf/jdbc/sqlserver/SqlServerMetaDataProvider.java index c486fba87..ba524dcbb 100755 --- a/morf-sqlserver/src/main/java/org/alfasoftware/morf/jdbc/sqlserver/SqlServerMetaDataProvider.java +++ b/morf-sqlserver/src/main/java/org/alfasoftware/morf/jdbc/sqlserver/SqlServerMetaDataProvider.java @@ -152,4 +152,5 @@ protected String buildSequenceSql(String schemaName) { return sequenceSqlBuilder.toString(); } + } diff --git a/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerDialect.java b/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerDialect.java index 7b13c0889..a04eb061c 100755 --- a/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerDialect.java +++ b/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerDialect.java @@ -70,7 +70,9 @@ protected List expectedCreateTableStatements() { "CREATE INDEX Alternate_1 ON TESTSCHEMA.Alternate ([stringField])", "CREATE TABLE TESTSCHEMA.NonNull ([id] BIGINT NOT NULL, [version] INTEGER CONSTRAINT NonNull_version_DF DEFAULT 0, [stringField] NVARCHAR(3) COLLATE SQL_Latin1_General_CP1_CS_AS NOT NULL, [intField] NUMERIC(8,0) NOT NULL, [booleanField] BIT NOT NULL, [dateField] DATE NOT NULL, [blobField] IMAGE NOT NULL, CONSTRAINT [NonNull_PK] PRIMARY KEY ([id]))", "CREATE TABLE TESTSCHEMA.CompositePrimaryKey ([id] BIGINT NOT NULL, [version] INTEGER CONSTRAINT CompositePrimaryKey_version_DF DEFAULT 0, [stringField] NVARCHAR(3) COLLATE SQL_Latin1_General_CP1_CS_AS NOT NULL, [secondPrimaryKey] NVARCHAR(3) COLLATE SQL_Latin1_General_CP1_CS_AS NOT NULL, CONSTRAINT [CompositePrimaryKey_PK] PRIMARY KEY ([id], [secondPrimaryKey]))", - "CREATE TABLE TESTSCHEMA.AutoNumber ([intField] BIGINT NOT NULL IDENTITY(5, 1), CONSTRAINT [AutoNumber_PK] PRIMARY KEY ([intField]))" + "CREATE TABLE TESTSCHEMA.AutoNumber ([intField] BIGINT NOT NULL IDENTITY(5, 1), CONSTRAINT [AutoNumber_PK] PRIMARY KEY ([intField]))", + "CREATE TABLE TESTSCHEMA.Measurement ([intField] NUMERIC(8,0) NOT NULL, [dateField] DATE NOT NULL, [stringField] NVARCHAR(3) COLLATE SQL_Latin1_General_CP1_CS_AS NOT NULL)", + "CREATE TABLE TESTSCHEMA.MeasurementHash ([intField] NUMERIC(8,0) NOT NULL, [dateField] DATE NOT NULL, [stringField] NVARCHAR(3) COLLATE SQL_Latin1_General_CP1_CS_AS NOT NULL)" ); } diff --git a/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerMetaDataProvider.java b/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerMetaDataProvider.java index bc365c983..8516862db 100644 --- a/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerMetaDataProvider.java +++ b/morf-sqlserver/src/test/java/org/alfasoftware/morf/jdbc/sqlserver/TestSqlServerMetaDataProvider.java @@ -15,6 +15,20 @@ package org.alfasoftware.morf.jdbc.sqlserver; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.RETURNS_SMART_NULLS; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import javax.sql.DataSource; + import org.alfasoftware.morf.jdbc.DatabaseType; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.Sequence; @@ -23,16 +37,6 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import javax.sql.DataSource; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.*; -import static org.mockito.Mockito.when; - /** * Test class for {@link SqlServerMetaDataProvider} diff --git a/morf-testsupport/src/main/java/org/alfasoftware/morf/jdbc/AbstractSqlDialectTest.java b/morf-testsupport/src/main/java/org/alfasoftware/morf/jdbc/AbstractSqlDialectTest.java index c9c480cf5..0e245537b 100755 --- a/morf-testsupport/src/main/java/org/alfasoftware/morf/jdbc/AbstractSqlDialectTest.java +++ b/morf-testsupport/src/main/java/org/alfasoftware/morf/jdbc/AbstractSqlDialectTest.java @@ -128,7 +128,9 @@ import org.alfasoftware.morf.metadata.AdditionalMetadata; import org.alfasoftware.morf.metadata.Column; import org.alfasoftware.morf.metadata.DataType; +import org.alfasoftware.morf.metadata.DatePartitionedByPeriodRule; import org.alfasoftware.morf.metadata.Index; +import org.alfasoftware.morf.metadata.PartitioningByHashRule; import org.alfasoftware.morf.metadata.Schema; import org.alfasoftware.morf.metadata.SchemaResource; import org.alfasoftware.morf.metadata.Sequence; @@ -168,6 +170,7 @@ import org.alfasoftware.morf.upgrade.adapt.AlteredTable; import org.apache.commons.lang3.StringUtils; import org.joda.time.LocalDate; +import org.joda.time.Period; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -213,6 +216,9 @@ public abstract class AbstractSqlDialectTest { private static final String NON_NULL_TABLE = "NonNull"; private static final String COMPOSITE_PRIMARY_KEY_TABLE = "CompositePrimaryKey"; private static final String AUTO_NUMBER_TABLE = "AutoNumber"; + private static final String MEASUREMENT_TABLE = "Measurement"; + private static final String MEASUREMENT_HASH_TABLE = "MeasurementHash"; + private static final String INNER_FIELD_B = "innerFieldB"; private static final String INNER_FIELD_A = "innerFieldA"; @@ -471,6 +477,22 @@ public void setUp() { autonumber(INT_FIELD, 5) ); + Table partitionedTable = table(MEASUREMENT_TABLE) + .columns( + column(INT_FIELD, DataType.DECIMAL, 8), + column(DATE_FIELD, DataType.DATE).partitioned(), + column(STRING_FIELD, DataType.STRING, 3) + ).partitionBy( + new DatePartitionedByPeriodRule(DATE_FIELD, LocalDate.parse("2012-03-01"), Period.months(1), 2)); + + Table partitionedTableByHash = table(MEASUREMENT_HASH_TABLE) + .columns( + column(INT_FIELD, DataType.DECIMAL, 8).partitioned(), + column(DATE_FIELD, DataType.DATE), + column(STRING_FIELD, DataType.STRING, 3) + ).partitionBy( + new PartitioningByHashRule(INT_FIELD, 8)); + // Test view TableReference tr = new TableReference(TEST_TABLE); FieldReference f = new FieldReference(STRING_FIELD); @@ -503,7 +525,7 @@ public void setUp() { // Builds a test schema metadata = schema(testTable, testTempTable, testTableLongName, alternateTestTable, alternateTestTempTable, otherTable, testTableAllUpperCase, testTableMixedCase, nonNullTable, nonNullTempTable, compositePrimaryKey, autoNumber, - inner, insertAB, insertA); + partitionedTable, partitionedTableByHash, inner, insertAB, insertA); } /** @@ -537,6 +559,8 @@ public void testCreateTableStatements() { Table nonNull = metadata.getTable(NON_NULL_TABLE); Table compositePrimaryKey = metadata.getTable(COMPOSITE_PRIMARY_KEY_TABLE); Table autoNumber = metadata.getTable(AUTO_NUMBER_TABLE); + Table partitionedTable = metadata.getTable(MEASUREMENT_TABLE); + Table partitionedTableByHash = metadata.getTable(MEASUREMENT_HASH_TABLE); compareStatements( expectedCreateTableStatements(), @@ -544,7 +568,9 @@ public void testCreateTableStatements() { testDialect.tableDeploymentStatements(alternate), testDialect.tableDeploymentStatements(nonNull), testDialect.tableDeploymentStatements(compositePrimaryKey), - testDialect.tableDeploymentStatements(autoNumber) + testDialect.tableDeploymentStatements(autoNumber), + testDialect.tableDeploymentStatements(partitionedTable), + testDialect.tableDeploymentStatements(partitionedTableByHash) ); }