From e982d74d9a19ff83fbf8cdf8d9e61fa8acdec987 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Tue, 24 Dec 2024 18:51:34 +0900 Subject: [PATCH 01/18] Add admin interface and operation attributes things for Attribute-Based Access Control (#2405) --- .../java/com/scalar/db/api/AbacAdmin.java | 786 ++++++++++++++++++ .../db/api/AbacOperationAttributes.java | 47 ++ .../java/com/scalar/db/api/DeleteBuilder.java | 64 +- .../db/api/DistributedTransactionAdmin.java | 2 +- .../java/com/scalar/db/api/GetBuilder.java | 80 +- .../com/scalar/db/api/OperationBuilder.java | 60 ++ .../java/com/scalar/db/api/ScanBuilder.java | 102 ++- .../java/com/scalar/db/api/UpdateBuilder.java | 64 +- .../java/com/scalar/db/api/UpsertBuilder.java | 66 +- .../DecoratedDistributedTransactionAdmin.java | 222 +++++ .../com/scalar/db/common/error/CoreError.java | 6 + .../db/api/AbacOperationAttributesTest.java | 152 ++++ .../com/scalar/db/api/DeleteBuilderTest.java | 64 +- .../com/scalar/db/api/GetBuilderTest.java | 103 ++- .../com/scalar/db/api/ScanBuilderTest.java | 167 +++- .../com/scalar/db/api/UpdateBuilderTest.java | 102 ++- .../com/scalar/db/api/UpsertBuilderTest.java | 100 ++- 17 files changed, 2082 insertions(+), 105 deletions(-) create mode 100644 core/src/main/java/com/scalar/db/api/AbacAdmin.java create mode 100644 core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java create mode 100644 core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java diff --git a/core/src/main/java/com/scalar/db/api/AbacAdmin.java b/core/src/main/java/com/scalar/db/api/AbacAdmin.java new file mode 100644 index 0000000000..4ae41b7cfa --- /dev/null +++ b/core/src/main/java/com/scalar/db/api/AbacAdmin.java @@ -0,0 +1,786 @@ +package com.scalar.db.api; + +import com.scalar.db.common.error.CoreError; +import com.scalar.db.exception.storage.ExecutionException; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nullable; + +/** An interface for administrative operations for attribute-based access control. */ +public interface AbacAdmin { + + /** + * Creates a policy with the given name and data tag column name. + * + * @param policyName the policy name + * @param dataTagColumnName the data tag column name. If null, the default data tag column name is + * used + * @throws ExecutionException if the operation fails + */ + default void createPolicy(String policyName, @Nullable String dataTagColumnName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Enables a policy that has the given name. + * + * @param policyName the policy name + * @throws ExecutionException if the operation fails + */ + default void enablePolicy(String policyName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Disables a policy that has the given name. + * + * @param policyName the policy name + * @throws ExecutionException if the operation fails + */ + default void disablePolicy(String policyName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves a policy that has the given name. + * + * @param policyName the policy name + * @return the policy + * @throws ExecutionException if the operation fails + */ + default Optional getPolicy(String policyName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves all policies. + * + * @return the policies + * @throws ExecutionException if the operation fails + */ + default List getPolicies() throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Creates a level with the given short name, long name and level number for the given policy. + * + * @param policyName the policy name + * @param levelShortName the short name of the level + * @param levelLongName the long name of the level + * @param levelNumber the level number + * @throws ExecutionException if the operation fails + */ + default void createLevel( + String policyName, String levelShortName, String levelLongName, int levelNumber) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Drops a level that has the given short name for the given policy. + * + * @param policyName the policy name + * @param levelShortName the short name of the level + * @throws ExecutionException if the operation fails + */ + default void dropLevel(String policyName, String levelShortName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves a level that has the given short name for the given policy. + * + * @param policyName the policy name + * @param levelShortName the short name of the level + * @return the level + * @throws ExecutionException if the operation fails + */ + default Optional getLevel(String policyName, String levelShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves all levels for the given policy. + * + * @param policyName the policy name + * @return the levels + * @throws ExecutionException if the operation fails + */ + default List getLevels(String policyName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Creates a compartment with the given short name and long name for the given policy. + * + * @param policyName the policy name + * @param compartmentShortName the short name of the compartment + * @param compartmentLongName the long name of the compartment + * @throws ExecutionException if the operation fails + */ + default void createCompartment( + String policyName, String compartmentShortName, String compartmentLongName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Drops a compartment that has the given short name for the given policy. + * + * @param policyName the policy name + * @param compartmentShortName the short name of the compartment + * @throws ExecutionException if the operation fails + */ + default void dropCompartment(String policyName, String compartmentShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves a compartment that has the given short name for the given policy. + * + * @param policyName the policy name + * @param compartmentShortName the short name of the compartment + * @return the compartment + * @throws ExecutionException if the operation fails + */ + default Optional getCompartment(String policyName, String compartmentShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves all compartments for the given policy. + * + * @param policyName the policy name + * @return the compartments + * @throws ExecutionException if the operation fails + */ + default List getCompartments(String policyName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Creates a group with the given short name, long name, and the short name of the parent group + * for the given policy. + * + * @param policyName the policy name + * @param groupShortName the short name of the group + * @param groupLongName the long name of the group + * @param parentGroupShortName the short name of the parent group. If null, the group is a + * top-level group + * @throws ExecutionException if the operation fails + */ + default void createGroup( + String policyName, + String groupShortName, + String groupLongName, + @Nullable String parentGroupShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Drops a group that has the given short name for the given policy. + * + * @param policyName the policy name + * @param groupShortName the short name of the group + * @throws ExecutionException if the operation fails + */ + default void dropGroup(String policyName, String groupShortName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves a group that has the given short name for the given policy. + * + * @param policyName the policy name + * @param groupShortName the short name of the group + * @return the group + * @throws ExecutionException if the operation fails + */ + default Optional getGroup(String policyName, String groupShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves all groups for the given policy. + * + * @param policyName the policy name + * @return the groups + * @throws ExecutionException if the operation fails + */ + default List getGroups(String policyName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Sets the given levels of the given policy to a user that has the given username. + * + * @param policyName the policy name + * @param username the username + * @param levelShortName the short name of the level to set + * @param defaultLevelShortName the short name of the default level. If null, the {@code + * levelShortName} will be used as the default level + * @param rowLevelShortName the short name of the row level. If null, the {@code + * defaultLevelShortName} will be used as the row level + * @throws ExecutionException if the operation fails + */ + default void setLevelsToUser( + String policyName, + String username, + String levelShortName, + @Nullable String defaultLevelShortName, + @Nullable String rowLevelShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Adds the given compartment of the given policy to a user that has the given username. Before + * adding the compartment, levels must be set to the user. + * + * @param policyName the policy name + * @param username the username + * @param compartmentShortName the short name of the compartment + * @param accessMode the access mode + * @param defaultCompartment whether the compartment is a default compartment + * @param rowCompartment whether the compartment is a row compartment + * @throws ExecutionException if the operation fails + */ + default void addCompartmentToUser( + String policyName, + String username, + String compartmentShortName, + AccessMode accessMode, + boolean defaultCompartment, + boolean rowCompartment) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Removes the given compartment of the given policy from a user that has the given username. + * + * @param policyName the policy name + * @param username the username + * @param compartmentShortName the short name of the compartment + * @throws ExecutionException if the operation fails + */ + default void removeCompartmentFromUser( + String policyName, String username, String compartmentShortName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Adds the given group of the given policy to a user that has the given username. Before adding + * the group, levels must be set to the user. + * + * @param policyName the policy name + * @param username the username + * @param groupShortName the short name of the group + * @param accessMode the access mode + * @param defaultGroup whether the group is a default group + * @param rowGroup whether the group is a row group + * @throws ExecutionException if the operation fails + */ + default void addGroupToUser( + String policyName, + String username, + String groupShortName, + AccessMode accessMode, + boolean defaultGroup, + boolean rowGroup) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Removes the given group of the given policy from a user that has the given username. + * + * @param policyName the policy name + * @param username the username + * @param groupShortName the short name of the group + * @throws ExecutionException if the operation fails + */ + default void removeGroupFromUser(String policyName, String username, String groupShortName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Drops the user tag information of a user with the given username for the given policy. + * + * @param policyName the policy name + * @param username the username + * @throws ExecutionException if the operation fails + */ + default void dropUserTagInfoFromUser(String policyName, String username) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves the user tag information of a user with the given username for the given policy. + * + * @param policyName the policy name + * @param username the username + * @return the user tag information. If the user tag information is not registered, returns an + * empty optional + * @throws ExecutionException if the operation fails + */ + default Optional getUserTagInfo(String policyName, String username) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Applies the given policy to the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @throws ExecutionException if the operation fails + */ + default void applyPolicyToNamespace(String policyName, String namespaceName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Enables the given policy for the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @throws ExecutionException if the operation fails + */ + default void enableNamespacePolicy(String policyName, String namespaceName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Disables the given policy for the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @throws ExecutionException if the operation fails + */ + default void disableNamespacePolicy(String policyName, String namespaceName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves the namespace policy for the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @return the namespace policy. If the policy is not applied to the namespace, returns an empty + * optional + * @throws ExecutionException if the operation fails + */ + default Optional getNamespacePolicy(String policyName, String namespaceName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves all namespace policies. + * + * @return the namespaces policies + * @throws ExecutionException if the operation fails + */ + default List getNamespacePolicies() throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Applies the given policy to the given table of the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @param tableName the table name + * @throws ExecutionException if the operation fails + */ + default void applyPolicyToTable(String policyName, String namespaceName, String tableName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Enables the given policy of the given table of the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @param tableName the table name + * @throws ExecutionException if the operation fails + */ + default void enableTablePolicy(String policyName, String namespaceName, String tableName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Disables the given policy of the given table of the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @param tableName the table name + * @throws ExecutionException if the operation fails + */ + default void disableTablePolicy(String policyName, String namespaceName, String tableName) + throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves the table policy for the given table of the given namespace. + * + * @param policyName the policy name + * @param namespaceName the namespace name + * @param tableName the table name + * @return the table policy. If the policy is not applied to the table, returns an empty optional + * @throws ExecutionException if the operation fails + */ + default Optional getTablePolicy( + String policyName, String namespaceName, String tableName) throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** + * Retrieves all table policies. + * + * @return the table policies + * @throws ExecutionException if the operation fails + */ + default List getTablePolicies() throws ExecutionException { + throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); + } + + /** The state of a policy. */ + enum PolicyState { + /** The policy is enabled. */ + ENABLED, + + /** The policy is disabled. */ + DISABLED + } + + /** The access mode for compartments and groups. */ + enum AccessMode { + /** The access mode for read only. */ + READ_ONLY, + + /** The access mode for read and write. */ + READ_WRITE + } + + /** A policy for ABAC. All components of ABAC are associated with a policy. */ + interface Policy { + /** + * Returns the policy name. + * + * @return the policy name + */ + String getName(); + + /** + * Returns the data tag column name. + * + * @return the data tag column name + */ + String getDataTagColumnName(); + + /** + * Returns the state of the policy. + * + * @return the state + */ + PolicyState getState(); + } + + /** A level that is one of the components of a tag in ABAC. */ + interface Level { + /** + * Returns the policy name. + * + * @return the policy name + */ + String getPolicyName(); + + /** + * Returns the short name of the level. + * + * @return the short name of the level + */ + String getShortName(); + + /** + * Returns the long name of the level. + * + * @return the long name of the level + */ + String getLongName(); + + /** + * Returns the level number. + * + * @return the level number + */ + int getLevelNumber(); + } + + /** A compartment that is one of the components of a tag in ABAC. */ + interface Compartment { + /** + * Returns the policy name. + * + * @return the policy name + */ + String getPolicyName(); + + /** + * Returns the short name of the compartment. + * + * @return the short name of the compartment + */ + String getShortName(); + + /** + * Returns the long name of the compartment. + * + * @return the long name of the compartment + */ + String getLongName(); + } + + /** A group that is one of the components of a tag in ABAC. */ + interface Group { + /** + * Returns the policy name. + * + * @return the policy name + */ + String getPolicyName(); + + /** + * Returns the short name of the group. + * + * @return the short name of the group + */ + String getShortName(); + + /** + * Returns the long name of the group. + * + * @return the long name + */ + String getLongName(); + + /** + * Returns the parent group short name if the group is not a top-level group. + * + * @return the parent group short name. If the group is a top-level group, returns an empty + * optional + */ + Optional getParentGroupShortName(); + } + + /** The user tag information of a user for a policy in ABAC. */ + interface UserTagInfo { + + /** The level information. */ + interface LevelInfo { + /** + * Returns the short name of the level. + * + * @return the short name of the level + */ + String getLevelShortName(); + + /** + * Returns the short name of the default level. + * + * @return the short name of the default level + */ + String getDefaultLevelShortName(); + + /** + * Returns the short name of the row level. + * + * @return the short name of the row level + */ + String getRowLevelShortName(); + } + + /** The compartment information. */ + interface CompartmentInfo { + /** + * Returns the short names of the compartments that the user has read access to. + * + * @return the short names of the compartments that the user has read access to + */ + List getReadCompartmentShortNames(); + + /** + * Returns the short names of the compartments that the user has write access to. + * + * @return the short names of the compartments that the user has write access to + */ + List getWriteCompartmentShortNames(); + + /** + * Returns the short names of the default compartments that the user has read access to. + * + * @return the short names of the default compartments that the user has read access to + */ + List getDefaultReadCompartmentShortNames(); + + /** + * Returns the short names of the default compartments that the user has write access to. + * + * @return the short names of + */ + List getDefaultWriteCompartmentShortNames(); + + /** + * Returns the short names of the row compartments. + * + * @return the short names of the row compartments + */ + List getRowCompartmentShortNames(); + } + + /** The group information. */ + interface GroupInfo { + /** + * Returns the short names of the groups that the user has read access to. + * + * @return the short names of the groups that the user has read access to + */ + List getReadGroupShortNames(); + + /** + * Returns the short names of the groups that the user has write access to. + * + * @return the short names of the groups that the user has write access to + */ + List getWriteGroupShortNames(); + + /** + * Returns the short names of the default groups that the user has read access to. + * + * @return the short names of the default groups that the user has read access to + */ + List getDefaultReadGroupShortNames(); + + /** + * Returns the short names of the default groups that the user has write access to. + * + * @return the short names of the default groups that the user has write access to + */ + List getDefaultWriteGroupShortNames(); + + /** + * Returns the short names of the row groups. + * + * @return the short names of the row groups. + */ + List getRowGroupShortNames(); + } + + /** + * Returns the policy name. + * + * @return the policy name + */ + String getPolicyName(); + + /** + * Returns the username. + * + * @return the username + */ + String getUsername(); + + /** + * Returns the level information. + * + * @return the level information + */ + LevelInfo getLevelInfo(); + + /** + * Returns the compartment information. + * + * @return the compartment information + */ + CompartmentInfo getCompartmentInfo(); + + /** + * Returns the group information. + * + * @return the group information + */ + GroupInfo getGroupInfo(); + } + + /** The namespace policy. */ + interface NamespacePolicy { + /** + * Returns the policy name. + * + * @return the policy name + */ + String getPolicyName(); + + /** + * Returns the namespace name. + * + * @return the namespace name + */ + String getNamespaceName(); + + /** + * Returns the state of the policy. + * + * @return the state of the policy + */ + PolicyState getState(); + } + + /** The table policy. */ + interface TablePolicy { + /** + * Returns the policy name. + * + * @return the policy name + */ + String getPolicyName(); + + /** + * Returns the namespace name. + * + * @return the namespace name + */ + String getNamespaceName(); + + /** + * Returns the table name. + * + * @return the table name + */ + String getTableName(); + + /** + * Returns the state of the policy. + * + * @return the state of the policy + */ + PolicyState getState(); + } +} diff --git a/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java b/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java new file mode 100644 index 0000000000..b558f8726a --- /dev/null +++ b/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java @@ -0,0 +1,47 @@ +package com.scalar.db.api; + +import java.util.Map; +import java.util.Optional; + +/** A utility class to manipulate the operation attributes for attribute-based access control. */ +public final class AbacOperationAttributes { + + private static final String OPERATION_ATTRIBUTE_PREFIX = "abac-"; + public static final String READ_TAG_PREFIX = OPERATION_ATTRIBUTE_PREFIX + "read-tag-"; + public static final String WRITE_TAG_PREFIX = OPERATION_ATTRIBUTE_PREFIX + "write-tag-"; + + private AbacOperationAttributes() {} + + public static void setReadTag(Map attributes, String policyName, String readTag) { + attributes.put(READ_TAG_PREFIX + policyName, readTag); + } + + public static void clearReadTag(Map attributes, String policyName) { + attributes.remove(READ_TAG_PREFIX + policyName); + } + + public static void clearReadTags(Map attributes) { + attributes.entrySet().removeIf(e -> e.getKey().startsWith(READ_TAG_PREFIX)); + } + + public static void setWriteTag( + Map attributes, String policyName, String writeTag) { + attributes.put(WRITE_TAG_PREFIX + policyName, writeTag); + } + + public static void clearWriteTag(Map attributes, String policyName) { + attributes.remove(WRITE_TAG_PREFIX + policyName); + } + + public static void clearWriteTags(Map attributes) { + attributes.entrySet().removeIf(e -> e.getKey().startsWith(WRITE_TAG_PREFIX)); + } + + public static Optional getReadTag(Operation operation, String policyName) { + return operation.getAttribute(READ_TAG_PREFIX + policyName); + } + + public static Optional getWriteTag(Operation operation, String policyName) { + return operation.getAttribute(WRITE_TAG_PREFIX + policyName); + } +} diff --git a/core/src/main/java/com/scalar/db/api/DeleteBuilder.java b/core/src/main/java/com/scalar/db/api/DeleteBuilder.java index 0b2831d53a..e312d7d1a3 100644 --- a/core/src/main/java/com/scalar/db/api/DeleteBuilder.java +++ b/core/src/main/java/com/scalar/db/api/DeleteBuilder.java @@ -3,7 +3,11 @@ import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.OperationBuilder.AbacReadTagAttribute; +import com.scalar.db.api.OperationBuilder.AbacWriteTagAttribute; import com.scalar.db.api.OperationBuilder.Attribute; +import com.scalar.db.api.OperationBuilder.ClearAbacReadTagAttribute; +import com.scalar.db.api.OperationBuilder.ClearAbacWriteTagAttribute; import com.scalar.db.api.OperationBuilder.ClearAttribute; import com.scalar.db.api.OperationBuilder.ClearClusteringKey; import com.scalar.db.api.OperationBuilder.ClearCondition; @@ -68,7 +72,9 @@ public static class Buildable extends OperationBuilder.Buildable implements ClusteringKey, Consistency, Condition, - Attribute { + Attribute, + AbacReadTagAttribute, + AbacWriteTagAttribute { @Nullable Key clusteringKey; @Nullable com.scalar.db.api.Consistency consistency; @Nullable MutationCondition condition; @@ -114,6 +120,22 @@ public Buildable attributes(Map attributes) { return this; } + @Override + public Buildable readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + + @Override + public Buildable writeTag(String policyName, String writeTag) { + checkNotNull(policyName); + checkNotNull(writeTag); + AbacOperationAttributes.setWriteTag(attributes, policyName, writeTag); + return this; + } + @Override public Delete build() { return new Delete( @@ -134,7 +156,9 @@ public static class BuildableFromExisting extends Buildable ClearCondition, ClearClusteringKey, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute, + ClearAbacWriteTagAttribute { BuildableFromExisting(Delete delete) { super( @@ -192,6 +216,18 @@ public BuildableFromExisting attributes(Map attributes) { return this; } + @Override + public Buildable readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + + @Override + public Buildable writeTag(String policyName, String writeTag) { + super.writeTag(policyName, writeTag); + return this; + } + @Override public BuildableFromExisting condition(MutationCondition condition) { super.condition(condition); @@ -227,5 +263,29 @@ public BuildableFromExisting clearAttribute(String name) { this.attributes.remove(name); return this; } + + @Override + public BuildableFromExisting clearReadTag(String policyName) { + AbacOperationAttributes.clearReadTag(attributes, policyName); + return this; + } + + @Override + public BuildableFromExisting clearReadTags() { + AbacOperationAttributes.clearReadTags(attributes); + return this; + } + + @Override + public BuildableFromExisting clearWriteTag(String policyName) { + AbacOperationAttributes.clearWriteTag(attributes, policyName); + return this; + } + + @Override + public BuildableFromExisting clearWriteTags() { + AbacOperationAttributes.clearWriteTags(attributes); + return this; + } } } diff --git a/core/src/main/java/com/scalar/db/api/DistributedTransactionAdmin.java b/core/src/main/java/com/scalar/db/api/DistributedTransactionAdmin.java index 608a520062..3617e14270 100644 --- a/core/src/main/java/com/scalar/db/api/DistributedTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/api/DistributedTransactionAdmin.java @@ -8,7 +8,7 @@ * An administrative interface for distributed transaction implementations. The user can execute * administrative operations with it like createNamespace/createTable/getTableMetadata. */ -public interface DistributedTransactionAdmin extends Admin, AuthAdmin, AutoCloseable { +public interface DistributedTransactionAdmin extends Admin, AuthAdmin, AbacAdmin, AutoCloseable { /** * Creates coordinator namespace and tables. diff --git a/core/src/main/java/com/scalar/db/api/GetBuilder.java b/core/src/main/java/com/scalar/db/api/GetBuilder.java index 1ce8882862..0b4d2f8de9 100644 --- a/core/src/main/java/com/scalar/db/api/GetBuilder.java +++ b/core/src/main/java/com/scalar/db/api/GetBuilder.java @@ -4,9 +4,11 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import com.scalar.db.api.OperationBuilder.AbacReadTagAttribute; import com.scalar.db.api.OperationBuilder.And; import com.scalar.db.api.OperationBuilder.Attribute; import com.scalar.db.api.OperationBuilder.Buildable; +import com.scalar.db.api.OperationBuilder.ClearAbacReadTagAttribute; import com.scalar.db.api.OperationBuilder.ClearAttribute; import com.scalar.db.api.OperationBuilder.ClearClusteringKey; import com.scalar.db.api.OperationBuilder.ClearConditions; @@ -93,7 +95,8 @@ public static class BuildableGet extends Buildable implements ClusteringKey, Consistency, Projection, - Attribute { + Attribute, + AbacReadTagAttribute { final List projections = new ArrayList<>(); @Nullable Key clusteringKey; @Nullable com.scalar.db.api.Consistency consistency; @@ -158,6 +161,14 @@ public BuildableGet attributes(Map attributes) { return this; } + @Override + public BuildableGet readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + @Override public Get build() { return build(ImmutableSet.of()); @@ -227,6 +238,12 @@ public BuildableGetWithPartitionKey attributes(Map attributes) { return this; } + @Override + public BuildableGet readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + @Override public BuildableGetWithOngoingWhere where(ConditionalExpression condition) { checkNotNull(condition); @@ -390,6 +407,7 @@ public static class BuildableGetWithIndex implements Consistency, Projection, Attribute, + AbacReadTagAttribute, OperationBuilder.Where, WhereAnd, WhereOr { @@ -447,6 +465,14 @@ public BuildableGetWithIndex attributes(Map attributes) { return this; } + @Override + public BuildableGetWithIndex readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + @Override public BuildableGetWithIndexOngoingWhere where(ConditionalExpression condition) { checkNotNull(condition); @@ -602,7 +628,8 @@ public BuildableGetWithIndexOngoingWhereOr or(AndConditionSet andConditionSet) { public static class BuildableGetWithIndexWhere implements Consistency, Projection, - Attribute { + Attribute, + AbacReadTagAttribute { BuildableGetWithIndex buildableGetWithIndex; final SelectionBuilder.Where where; @@ -657,6 +684,12 @@ public BuildableGetWithIndexWhere attributes(Map attributes) { return this; } + @Override + public BuildableGetWithIndexWhere readTag(String policyName, String readTag) { + buildableGetWithIndex = buildableGetWithIndex.readTag(policyName, readTag); + return this; + } + public Get build() { return buildableGetWithIndex.build(getConjunctions(where)); } @@ -674,7 +707,8 @@ public static class BuildableGetOrGetWithIndexFromExisting extends BuildableGet ClearProjections, ClearClusteringKey, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute { private Key indexKey; private final boolean isGetWithIndex; @@ -770,6 +804,12 @@ public BuildableGetOrGetWithIndexFromExisting attributes(Map att return this; } + @Override + public BuildableGet readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + @Override public BuildableGetFromExistingWithOngoingWhere where(ConditionalExpression condition) { checkConditionsEmpty(); @@ -845,6 +885,18 @@ public BuildableGetOrGetWithIndexFromExisting clearAttribute(String name) { return this; } + @Override + public BuildableGetOrGetWithIndexFromExisting clearReadTag(String policyName) { + AbacOperationAttributes.clearReadTag(attributes, policyName); + return this; + } + + @Override + public BuildableGetOrGetWithIndexFromExisting clearReadTags() { + AbacOperationAttributes.clearReadTags(attributes); + return this; + } + private void checkNotGet() { if (!isGetWithIndex) { throw new UnsupportedOperationException( @@ -910,9 +962,11 @@ public static class BuildableGetFromExistingWithWhere Consistency, Projection, Attribute, + AbacReadTagAttribute, ClearProjections, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute { private final BuildableGetOrGetWithIndexFromExisting BuildableGetFromExisting; final SelectionBuilder.Where where; @@ -999,6 +1053,12 @@ public BuildableGetFromExistingWithWhere attributes(Map attribut return this; } + @Override + public BuildableGetFromExistingWithWhere readTag(String policyName, String readTag) { + BuildableGetFromExisting.readTag(policyName, readTag); + return this; + } + @Override public BuildableGetFromExistingWithWhere clearProjections() { BuildableGetFromExisting.clearProjections(); @@ -1023,6 +1083,18 @@ public BuildableGetFromExistingWithWhere clearAttribute(String name) { return this; } + @Override + public BuildableGetFromExistingWithWhere clearReadTag(String policyName) { + BuildableGetFromExisting.clearReadTag(policyName); + return this; + } + + @Override + public BuildableGetFromExistingWithWhere clearReadTags() { + BuildableGetFromExisting.clearReadTags(); + return this; + } + public Get build() { return BuildableGetFromExisting.build(getConjunctions(where)); } diff --git a/core/src/main/java/com/scalar/db/api/OperationBuilder.java b/core/src/main/java/com/scalar/db/api/OperationBuilder.java index 7c54695ace..6838222025 100644 --- a/core/src/main/java/com/scalar/db/api/OperationBuilder.java +++ b/core/src/main/java/com/scalar/db/api/OperationBuilder.java @@ -586,6 +586,66 @@ public interface ClearAttribute { T clearAttribute(String name); } + public interface AbacReadTagAttribute { + /** + * Adds a read tag attribute for the specified policy. This is a utility method for + * attribute-based access control. + * + * @param policyName the policy name + * @param readTag the read tag + * @return the operation builder + */ + T readTag(String policyName, String readTag); + } + + public interface AbacWriteTagAttribute { + /** + * Adds a write tag attribute for the specified policy. This is a utility method for + * attribute-based access control. + * + * @param policyName the policy name + * @param writeTag the write tag + * @return the operation builder + */ + T writeTag(String policyName, String writeTag); + } + + public interface ClearAbacReadTagAttribute { + /** + * Clear the read tag attribute for the specified policy. This is a utility method for + * attribute-based access control. + * + * @param policyName the policy name + * @return the operation builder + */ + T clearReadTag(String policyName); + + /** + * Clear all read tags. This is a utility method for attribute-based access control. + * + * @return the operation builder + */ + T clearReadTags(); + } + + public interface ClearAbacWriteTagAttribute { + /** + * Clear the write tag attribute for the specified policy. This is a utility method for + * attribute-based access control. + * + * @param policyName the policy name + * @return the operation builder + */ + T clearWriteTag(String policyName); + + /** + * Clear all write tags. This is a utility method for attribute-based access control. + * + * @return the operation builder + */ + T clearWriteTags(); + } + public abstract static class TableBuilder implements Table { final String namespace; diff --git a/core/src/main/java/com/scalar/db/api/ScanBuilder.java b/core/src/main/java/com/scalar/db/api/ScanBuilder.java index 32a38c16bb..116c6c9bb2 100644 --- a/core/src/main/java/com/scalar/db/api/ScanBuilder.java +++ b/core/src/main/java/com/scalar/db/api/ScanBuilder.java @@ -4,10 +4,12 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import com.scalar.db.api.OperationBuilder.AbacReadTagAttribute; import com.scalar.db.api.OperationBuilder.All; import com.scalar.db.api.OperationBuilder.And; import com.scalar.db.api.OperationBuilder.Attribute; import com.scalar.db.api.OperationBuilder.Buildable; +import com.scalar.db.api.OperationBuilder.ClearAbacReadTagAttribute; import com.scalar.db.api.OperationBuilder.ClearAttribute; import com.scalar.db.api.OperationBuilder.ClearBoundaries; import com.scalar.db.api.OperationBuilder.ClearConditions; @@ -106,7 +108,8 @@ public static class BuildableScan extends Buildable Consistency, Projection, Limit, - Attribute { + Attribute, + AbacReadTagAttribute { final List orderings = new ArrayList<>(); final List projections = new ArrayList<>(); @Nullable Key startClusteringKey; @@ -215,6 +218,14 @@ public BuildableScan attributes(Map attributes) { return this; } + @Override + public BuildableScan readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + @Override public Scan build() { return build(ImmutableSet.of()); @@ -330,6 +341,12 @@ public BuildableScanWithPartitionKey attributes(Map attributes) return this; } + @Override + public BuildableScan readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + @Override public BuildableScanWithOngoingWhere where(ConditionalExpression condition) { checkNotNull(condition); @@ -498,7 +515,8 @@ public static class BuildableScanWithIndex WhereAnd, WhereOr, Limit, - Attribute { + Attribute, + AbacReadTagAttribute { @Nullable private final String namespaceName; private final String tableName; private final Key indexKey; @@ -560,6 +578,14 @@ public BuildableScanWithIndex attributes(Map attributes) { return this; } + @Override + public BuildableScanWithIndex readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + @Override public BuildableScanWithIndexOngoingWhere where(ConditionalExpression condition) { checkNotNull(condition); @@ -717,7 +743,8 @@ public static class BuildableScanWithIndexWhere implements Consistency, Projection, Limit, - Attribute { + Attribute, + AbacReadTagAttribute { BuildableScanWithIndex buildableScanWithIndex; final Where where; @@ -778,6 +805,12 @@ public BuildableScanWithIndexWhere attributes(Map attributes) { return this; } + @Override + public BuildableScanWithIndexWhere readTag(String policyName, String readTag) { + buildableScanWithIndex = buildableScanWithIndex.readTag(policyName, readTag); + return this; + } + public Scan build() { return buildableScanWithIndex.build(getConjunctions(where)); } @@ -791,7 +824,8 @@ public static class BuildableScanAll WhereAnd, WhereOr, Limit, - Attribute { + Attribute, + AbacReadTagAttribute { private final String namespaceName; private final String tableName; private final List orderings = new ArrayList<>(); @@ -871,6 +905,14 @@ public BuildableScanAll attributes(Map attributes) { return this; } + @Override + public BuildableScanAll readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + @Override public BuildableScanAllWithOngoingWhere where(ConditionalExpression condition) { checkNotNull(condition); @@ -1028,7 +1070,8 @@ public static class BuildableScanAllWithWhere Projection, Ordering, Limit, - Attribute { + Attribute, + AbacReadTagAttribute { final BuildableScanAll buildableScanAll; final Where where; @@ -1106,6 +1149,12 @@ public BuildableScanAllWithWhere attributes(Map attributes) { return this; } + @Override + public BuildableScanAllWithWhere readTag(String policyName, String readTag) { + buildableScanAll.readTag(policyName, readTag); + return this; + } + public Scan build() { return buildableScanAll.build(getConjunctions(where)); } @@ -1124,7 +1173,8 @@ public static class BuildableScanOrScanAllFromExisting extends BuildableScan ClearOrderings, ClearBoundaries, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute { private final boolean isScanWithIndex; private final boolean isScanAll; @@ -1210,6 +1260,12 @@ public BuildableScanOrScanAllFromExisting attributes(Map attribu return this; } + @Override + public BuildableScan readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + @Override public BuildableScanOrScanAllFromExisting projection(String projection) { super.projection(projection); @@ -1374,6 +1430,18 @@ public BuildableScanOrScanAllFromExisting clearAttribute(String name) { return this; } + @Override + public BuildableScanOrScanAllFromExisting clearReadTag(String policyName) { + AbacOperationAttributes.clearReadTag(attributes, policyName); + return this; + } + + @Override + public BuildableScanOrScanAllFromExisting clearReadTags() { + AbacOperationAttributes.clearReadTags(attributes); + return this; + } + private void checkNotScanWithIndexOrScanAll() { if (isScanWithIndex || isScanAll) { throw new UnsupportedOperationException( @@ -1466,10 +1534,12 @@ public static class BuildableScanFromExistingWithWhere Ordering, Limit, Attribute, + AbacReadTagAttribute, ClearProjections, ClearOrderings, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute { private final BuildableScanOrScanAllFromExisting buildableScanFromExisting; final Where where; @@ -1584,6 +1654,12 @@ public BuildableScanFromExistingWithWhere attributes(Map attribu return this; } + @Override + public BuildableScanFromExistingWithWhere readTag(String policyName, String readTag) { + buildableScanFromExisting.readTag(policyName, readTag); + return this; + } + @Override public BuildableScanFromExistingWithWhere clearProjections() { buildableScanFromExisting.clearProjections(); @@ -1614,6 +1690,18 @@ public BuildableScanFromExistingWithWhere clearAttribute(String name) { return this; } + @Override + public BuildableScanFromExistingWithWhere clearReadTag(String policyName) { + buildableScanFromExisting.clearReadTag(policyName); + return this; + } + + @Override + public BuildableScanFromExistingWithWhere clearReadTags() { + buildableScanFromExisting.clearReadTags(); + return this; + } + public Scan build() { return buildableScanFromExisting.build(getConjunctions(where)); } diff --git a/core/src/main/java/com/scalar/db/api/UpdateBuilder.java b/core/src/main/java/com/scalar/db/api/UpdateBuilder.java index 3b9d1fdf03..69975a81eb 100644 --- a/core/src/main/java/com/scalar/db/api/UpdateBuilder.java +++ b/core/src/main/java/com/scalar/db/api/UpdateBuilder.java @@ -3,7 +3,11 @@ import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.OperationBuilder.AbacReadTagAttribute; +import com.scalar.db.api.OperationBuilder.AbacWriteTagAttribute; import com.scalar.db.api.OperationBuilder.Attribute; +import com.scalar.db.api.OperationBuilder.ClearAbacReadTagAttribute; +import com.scalar.db.api.OperationBuilder.ClearAbacWriteTagAttribute; import com.scalar.db.api.OperationBuilder.ClearAttribute; import com.scalar.db.api.OperationBuilder.ClearClusteringKey; import com.scalar.db.api.OperationBuilder.ClearCondition; @@ -79,7 +83,9 @@ public static class Buildable extends OperationBuilder.Buildable implements ClusteringKey, Condition, Values, - Attribute { + Attribute, + AbacReadTagAttribute, + AbacWriteTagAttribute { final Map> columns = new LinkedHashMap<>(); @Nullable Key clusteringKey; @Nullable MutationCondition condition; @@ -111,6 +117,22 @@ public Buildable attributes(Map attributes) { return this; } + @Override + public Buildable readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + + @Override + public Buildable writeTag(String policyName, String writeTag) { + checkNotNull(policyName); + checkNotNull(writeTag); + AbacOperationAttributes.setWriteTag(attributes, policyName, writeTag); + return this; + } + @Override public Buildable condition(MutationCondition condition) { checkNotNull(condition); @@ -238,7 +260,9 @@ public static class BuildableFromExisting extends Buildable ClearValues, ClearCondition, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute, + ClearAbacWriteTagAttribute { BuildableFromExisting(Update update) { super( @@ -290,6 +314,18 @@ public BuildableFromExisting attributes(Map attributes) { return this; } + @Override + public Buildable readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + + @Override + public Buildable writeTag(String policyName, String writeTag) { + super.writeTag(policyName, writeTag); + return this; + } + @Override public BuildableFromExisting condition(MutationCondition condition) { super.condition(condition); @@ -421,5 +457,29 @@ public BuildableFromExisting clearAttribute(String name) { attributes.remove(name); return this; } + + @Override + public BuildableFromExisting clearReadTag(String policyName) { + AbacOperationAttributes.clearReadTag(attributes, policyName); + return this; + } + + @Override + public BuildableFromExisting clearReadTags() { + AbacOperationAttributes.clearReadTags(attributes); + return this; + } + + @Override + public BuildableFromExisting clearWriteTag(String policyName) { + AbacOperationAttributes.clearWriteTag(attributes, policyName); + return this; + } + + @Override + public BuildableFromExisting clearWriteTags() { + AbacOperationAttributes.clearWriteTags(attributes); + return this; + } } } diff --git a/core/src/main/java/com/scalar/db/api/UpsertBuilder.java b/core/src/main/java/com/scalar/db/api/UpsertBuilder.java index f5d7189c94..f30f5dff24 100644 --- a/core/src/main/java/com/scalar/db/api/UpsertBuilder.java +++ b/core/src/main/java/com/scalar/db/api/UpsertBuilder.java @@ -3,7 +3,11 @@ import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.OperationBuilder.AbacReadTagAttribute; +import com.scalar.db.api.OperationBuilder.AbacWriteTagAttribute; import com.scalar.db.api.OperationBuilder.Attribute; +import com.scalar.db.api.OperationBuilder.ClearAbacReadTagAttribute; +import com.scalar.db.api.OperationBuilder.ClearAbacWriteTagAttribute; import com.scalar.db.api.OperationBuilder.ClearAttribute; import com.scalar.db.api.OperationBuilder.ClearClusteringKey; import com.scalar.db.api.OperationBuilder.ClearNamespace; @@ -74,7 +78,11 @@ public Buildable partitionKey(Key partitionKey) { } public static class Buildable extends OperationBuilder.Buildable - implements ClusteringKey, Values, Attribute { + implements ClusteringKey, + Values, + Attribute, + AbacReadTagAttribute, + AbacWriteTagAttribute { final Map> columns = new LinkedHashMap<>(); @Nullable Key clusteringKey; final Map attributes = new HashMap<>(); @@ -105,6 +113,22 @@ public Buildable attributes(Map attributes) { return this; } + @Override + public Buildable readTag(String policyName, String readTag) { + checkNotNull(policyName); + checkNotNull(readTag); + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + return this; + } + + @Override + public Buildable writeTag(String policyName, String writeTag) { + checkNotNull(policyName); + checkNotNull(writeTag); + AbacOperationAttributes.setWriteTag(attributes, policyName, writeTag); + return this; + } + @Override public Buildable booleanValue(String columnName, boolean value) { columns.put(columnName, BooleanColumn.of(columnName, value)); @@ -223,7 +247,9 @@ public static class BuildableFromExisting extends Buildable ClearClusteringKey, ClearValues, ClearNamespace, - ClearAttribute { + ClearAttribute, + ClearAbacReadTagAttribute, + ClearAbacWriteTagAttribute { BuildableFromExisting(Upsert upsert) { super( @@ -274,6 +300,18 @@ public BuildableFromExisting attributes(Map attributes) { return this; } + @Override + public Buildable readTag(String policyName, String readTag) { + super.readTag(policyName, readTag); + return this; + } + + @Override + public Buildable writeTag(String policyName, String writeTag) { + super.writeTag(policyName, writeTag); + return this; + } + @Override public BuildableFromExisting booleanValue(String columnName, boolean value) { super.booleanValue(columnName, value); @@ -393,5 +431,29 @@ public BuildableFromExisting clearAttribute(String name) { attributes.remove(name); return this; } + + @Override + public BuildableFromExisting clearReadTag(String policyName) { + AbacOperationAttributes.clearReadTag(attributes, policyName); + return this; + } + + @Override + public BuildableFromExisting clearReadTags() { + AbacOperationAttributes.clearReadTags(attributes); + return this; + } + + @Override + public BuildableFromExisting clearWriteTag(String policyName) { + AbacOperationAttributes.clearWriteTag(attributes, policyName); + return this; + } + + @Override + public BuildableFromExisting clearWriteTags() { + AbacOperationAttributes.clearWriteTags(attributes); + return this; + } } } diff --git a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java index c33eb1bbc4..518fd40549 100644 --- a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java @@ -327,6 +327,228 @@ public Set getPrivileges(String username, String namespaceName, Strin return distributedTransactionAdmin.getPrivileges(username, namespaceName, tableName); } + @Override + public void createPolicy(String policyName, @Nullable String dataTagColumnName) + throws ExecutionException { + distributedTransactionAdmin.createPolicy(policyName, dataTagColumnName); + } + + @Override + public void enablePolicy(String policyName) throws ExecutionException { + distributedTransactionAdmin.enablePolicy(policyName); + } + + @Override + public void disablePolicy(String policyName) throws ExecutionException { + distributedTransactionAdmin.disablePolicy(policyName); + } + + @Override + public Optional getPolicy(String policyName) throws ExecutionException { + return distributedTransactionAdmin.getPolicy(policyName); + } + + @Override + public List getPolicies() throws ExecutionException { + return distributedTransactionAdmin.getPolicies(); + } + + @Override + public void createLevel( + String policyName, String levelShortName, String levelLongName, int levelNumber) + throws ExecutionException { + distributedTransactionAdmin.createLevel(policyName, levelShortName, levelLongName, levelNumber); + } + + @Override + public void dropLevel(String policyName, String levelShortName) throws ExecutionException { + distributedTransactionAdmin.dropLevel(policyName, levelShortName); + } + + @Override + public Optional getLevel(String policyName, String levelShortName) + throws ExecutionException { + return distributedTransactionAdmin.getLevel(policyName, levelShortName); + } + + @Override + public List getLevels(String policyName) throws ExecutionException { + return distributedTransactionAdmin.getLevels(policyName); + } + + @Override + public void createCompartment( + String policyName, String compartmentShortName, String compartmentLongName) + throws ExecutionException { + distributedTransactionAdmin.createCompartment( + policyName, compartmentShortName, compartmentLongName); + } + + @Override + public void dropCompartment(String policyName, String compartmentShortName) + throws ExecutionException { + distributedTransactionAdmin.dropCompartment(policyName, compartmentShortName); + } + + @Override + public Optional getCompartment(String policyName, String compartmentShortName) + throws ExecutionException { + return distributedTransactionAdmin.getCompartment(policyName, compartmentShortName); + } + + @Override + public List getCompartments(String policyName) throws ExecutionException { + return distributedTransactionAdmin.getCompartments(policyName); + } + + @Override + public void createGroup( + String policyName, + String groupShortName, + String groupLongName, + @Nullable String parentGroupShortName) + throws ExecutionException { + distributedTransactionAdmin.createGroup( + policyName, groupShortName, groupLongName, parentGroupShortName); + } + + @Override + public void dropGroup(String policyName, String groupShortName) throws ExecutionException { + distributedTransactionAdmin.dropGroup(policyName, groupShortName); + } + + @Override + public Optional getGroup(String policyName, String groupShortName) + throws ExecutionException { + return distributedTransactionAdmin.getGroup(policyName, groupShortName); + } + + @Override + public List getGroups(String policyName) throws ExecutionException { + return distributedTransactionAdmin.getGroups(policyName); + } + + @Override + public void setLevelsToUser( + String policyName, + String username, + String levelShortName, + @Nullable String defaultLevelShortName, + @Nullable String rowLevelShortName) + throws ExecutionException { + distributedTransactionAdmin.setLevelsToUser( + policyName, username, levelShortName, defaultLevelShortName, rowLevelShortName); + } + + @Override + public void addCompartmentToUser( + String policyName, + String username, + String compartmentShortName, + AccessMode accessMode, + boolean defaultCompartment, + boolean rowCompartment) + throws ExecutionException { + distributedTransactionAdmin.addCompartmentToUser( + policyName, username, compartmentShortName, accessMode, defaultCompartment, rowCompartment); + } + + @Override + public void removeCompartmentFromUser( + String policyName, String username, String compartmentShortName) throws ExecutionException { + distributedTransactionAdmin.removeCompartmentFromUser( + policyName, username, compartmentShortName); + } + + @Override + public void addGroupToUser( + String policyName, + String username, + String groupShortName, + AccessMode accessMode, + boolean defaultGroup, + boolean rowGroup) + throws ExecutionException { + distributedTransactionAdmin.addGroupToUser( + policyName, username, groupShortName, accessMode, defaultGroup, rowGroup); + } + + @Override + public void removeGroupFromUser(String policyName, String username, String groupShortName) + throws ExecutionException { + distributedTransactionAdmin.removeGroupFromUser(policyName, username, groupShortName); + } + + @Override + public void dropUserTagInfoFromUser(String policyName, String username) + throws ExecutionException { + distributedTransactionAdmin.dropUserTagInfoFromUser(policyName, username); + } + + @Override + public Optional getUserTagInfo(String policyName, String username) + throws ExecutionException { + return distributedTransactionAdmin.getUserTagInfo(policyName, username); + } + + @Override + public void applyPolicyToNamespace(String policyName, String namespaceName) + throws ExecutionException { + distributedTransactionAdmin.applyPolicyToNamespace(policyName, namespaceName); + } + + @Override + public void enableNamespacePolicy(String policyName, String namespaceName) + throws ExecutionException { + distributedTransactionAdmin.enableNamespacePolicy(policyName, namespaceName); + } + + @Override + public void disableNamespacePolicy(String policyName, String namespaceName) + throws ExecutionException { + distributedTransactionAdmin.disableNamespacePolicy(policyName, namespaceName); + } + + @Override + public Optional getNamespacePolicy(String policyName, String namespaceName) + throws ExecutionException { + return distributedTransactionAdmin.getNamespacePolicy(policyName, namespaceName); + } + + @Override + public List getNamespacePolicies() throws ExecutionException { + return distributedTransactionAdmin.getNamespacePolicies(); + } + + @Override + public void applyPolicyToTable(String policyName, String namespaceName, String tableName) + throws ExecutionException { + distributedTransactionAdmin.applyPolicyToTable(policyName, namespaceName, tableName); + } + + @Override + public void enableTablePolicy(String policyName, String namespaceName, String tableName) + throws ExecutionException { + distributedTransactionAdmin.enableTablePolicy(policyName, namespaceName, tableName); + } + + @Override + public void disableTablePolicy(String policyName, String namespaceName, String tableName) + throws ExecutionException { + distributedTransactionAdmin.disableTablePolicy(policyName, namespaceName, tableName); + } + + @Override + public Optional getTablePolicy( + String policyName, String namespaceName, String tableName) throws ExecutionException { + return distributedTransactionAdmin.getTablePolicy(policyName, namespaceName, tableName); + } + + @Override + public List getTablePolicies() throws ExecutionException { + return distributedTransactionAdmin.getTablePolicies(); + } + @Override public void close() { distributedTransactionAdmin.close(); diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index b02b3c45a6..5d2eba33e5 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -690,6 +690,12 @@ public enum CoreError implements ScalarDbError { ""), DATA_LOADER_ERROR_METHOD_NULL_ARGUMENT( Category.USER_ERROR, "0151", "Method null argument not allowed", "", ""), + ABAC_NOT_ENABLED( + Category.USER_ERROR, + "0152", + "The attribute-based access control feature is not enabled. To use this feature, you must enable it. Note that this feature is supported only in the ScalarDB Enterprise edition", + "", + ""), // // Errors for the concurrency error category diff --git a/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java b/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java new file mode 100644 index 0000000000..56ed98af0f --- /dev/null +++ b/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java @@ -0,0 +1,152 @@ +package com.scalar.db.api; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.io.Key; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.Test; + +public class AbacOperationAttributesTest { + + @Test + public void setReadTag_ShouldSetReadTag() { + // Arrange + Map attributes = new HashMap<>(); + String policyName = "policyName"; + String readTag = "readTag"; + + // Act + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + + // Assert + assertThat(attributes) + .containsEntry(AbacOperationAttributes.READ_TAG_PREFIX + policyName, readTag); + } + + @Test + public void clearReadTag_ShouldClearReadTag() { + // Arrange + Map attributes = new HashMap<>(); + String policyName = "policyName"; + String readTag = "readTag"; + AbacOperationAttributes.setReadTag(attributes, policyName, readTag); + + // Act + AbacOperationAttributes.clearReadTag(attributes, policyName); + + // Assert + assertThat(attributes).doesNotContainKey(AbacOperationAttributes.READ_TAG_PREFIX + policyName); + } + + @Test + public void clearReadTags_ShouldClearReadTags() { + // Arrange + Map attributes = new HashMap<>(); + String policyName1 = "policyName1"; + String policyName2 = "policyName2"; + String readTag1 = "readTag1"; + String readTag2 = "readTag2"; + AbacOperationAttributes.setReadTag(attributes, policyName1, readTag1); + AbacOperationAttributes.setReadTag(attributes, policyName2, readTag2); + + // Act + AbacOperationAttributes.clearReadTags(attributes); + + // Assert + assertThat(attributes).doesNotContainKey(AbacOperationAttributes.READ_TAG_PREFIX + policyName1); + assertThat(attributes).doesNotContainKey(AbacOperationAttributes.READ_TAG_PREFIX + policyName2); + } + + @Test + public void setWriteTag_ShouldSetWriteTag() { + // Arrange + Map attributes = new HashMap<>(); + String policyName = "policyName"; + String writeTag = "writeTag"; + + // Act + AbacOperationAttributes.setWriteTag(attributes, policyName, writeTag); + + // Assert + assertThat(attributes) + .containsEntry(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName, writeTag); + } + + @Test + public void clearWriteTag_ShouldClearWriteTag() { + // Arrange + Map attributes = new HashMap<>(); + String policyName = "policyName"; + String writeTag = "writeTag"; + AbacOperationAttributes.setWriteTag(attributes, policyName, writeTag); + + // Act + AbacOperationAttributes.clearWriteTag(attributes, policyName); + + // Assert + assertThat(attributes).doesNotContainKey(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName); + } + + @Test + public void clearWriteTags_ShouldClearWriteTags() { + // Arrange + Map attributes = new HashMap<>(); + String policyName1 = "policyName1"; + String policyName2 = "policyName2"; + String writeTag1 = "writeTag1"; + String writeTag2 = "writeTag2"; + AbacOperationAttributes.setWriteTag(attributes, policyName1, writeTag1); + AbacOperationAttributes.setWriteTag(attributes, policyName2, writeTag2); + + // Act + AbacOperationAttributes.clearWriteTags(attributes); + + // Assert + assertThat(attributes) + .doesNotContainKey(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName1); + assertThat(attributes) + .doesNotContainKey(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName2); + } + + @Test + public void getReadTag_ShouldReturnReadTag() { + // Arrange + String policyName = "policyName"; + String readTag = "readTag"; + Operation operation = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(Key.ofInt("pk", 0)) + .readTag(policyName, readTag) + .build(); + + // Act + Optional actual = AbacOperationAttributes.getReadTag(operation, policyName); + + // Assert + assertThat(actual).hasValue(readTag); + } + + @Test + public void getWriteTag_ShouldReturnWriteTag() { + // Arrange + String policyName = "policyName"; + String writeTag = "writeTag"; + Operation operation = + Update.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(Key.ofInt("pk", 0)) + .writeTag(policyName, writeTag) + .build(); + + // Act + Optional actual = AbacOperationAttributes.getWriteTag(operation, policyName); + + // Assert + assertThat(actual).hasValue(writeTag); + } +} diff --git a/core/src/test/java/com/scalar/db/api/DeleteBuilderTest.java b/core/src/test/java/com/scalar/db/api/DeleteBuilderTest.java index c704c8169e..853a87b356 100644 --- a/core/src/test/java/com/scalar/db/api/DeleteBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/DeleteBuilderTest.java @@ -66,6 +66,8 @@ public void build_WithAllParameters_ShouldBuildDeleteWithAllParameters() { .condition(condition1) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") .build(); // Assert @@ -77,7 +79,17 @@ public void build_WithAllParameters_ShouldBuildDeleteWithAllParameters() { partitionKey1, clusteringKey1, Consistency.EVENTUAL, - ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag"), condition1)); } @@ -101,7 +113,7 @@ public void build_FromExistingWithoutChange_ShouldCopy() { @Test public void build_FromExistingAndUpdateAllParameters_ShouldBuildDeleteWithUpdatedParameters() { // Arrange - Delete existingDelete = + Delete existingDelete1 = new Delete( NAMESPACE_1, TABLE_1, @@ -110,10 +122,23 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildDeleteWithUpdate Consistency.LINEARIZABLE, ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), condition1); + Delete existingDelete2 = + new Delete( + NAMESPACE_1, + TABLE_1, + partitionKey1, + clusteringKey1, + Consistency.LINEARIZABLE, + ImmutableMap.of( + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag"), + condition1); // Act - Delete newDelete = - Delete.newBuilder(existingDelete) + Delete newDelete1 = + Delete.newBuilder(existingDelete1) .partitionKey(partitionKey2) .clusteringKey(clusteringKey2) .namespace(NAMESPACE_2) @@ -124,10 +149,17 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildDeleteWithUpdate .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") + .build(); + Delete newDelete2 = + Delete.newBuilder(existingDelete2) + .clearReadTag("policyName1") + .clearWriteTag("policyName2") .build(); // Assert - assertThat(newDelete) + assertThat(newDelete1) .isEqualTo( new Delete( NAMESPACE_2, @@ -135,8 +167,28 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildDeleteWithUpdate partitionKey2, clusteringKey2, Consistency.EVENTUAL, - ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6"), + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag"), condition2)); + assertThat(newDelete2) + .isEqualTo( + new Delete( + NAMESPACE_1, + TABLE_1, + partitionKey1, + clusteringKey1, + Consistency.LINEARIZABLE, + ImmutableMap.of(), + condition1)); } @Test diff --git a/core/src/test/java/com/scalar/db/api/GetBuilderTest.java b/core/src/test/java/com/scalar/db/api/GetBuilderTest.java index e4a9bbd1a0..d99782ba33 100644 --- a/core/src/test/java/com/scalar/db/api/GetBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/GetBuilderTest.java @@ -9,6 +9,7 @@ import com.scalar.db.api.Selection.Conjunction; import com.scalar.db.io.Key; import java.util.Arrays; +import java.util.Collections; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mock; @@ -76,6 +77,7 @@ public void buildGet_WithClusteringKey_ShouldBuildGetWithClusteringKey() { .projections("c5", "c6") .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") .build(); // Assert @@ -87,7 +89,15 @@ public void buildGet_WithClusteringKey_ShouldBuildGetWithClusteringKey() { partitionKey1, clusteringKey1, Consistency.EVENTUAL, - ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("c1", "c2", "c3", "c4", "c5", "c6"), ImmutableSet.of())); } @@ -627,7 +637,7 @@ public void buildGet_FromExistingWithoutChange_ShouldCopy() { public void buildGet_FromExistingAndUpdateAllParametersExceptConjunctions_ShouldBuildGetWithUpdatedParameters() { // Arrange - Get existingGet = + Get existingGet1 = new Get( NAMESPACE_1, TABLE_1, @@ -643,10 +653,20 @@ public void buildGet_FromExistingWithoutChange_ShouldCopy() { Conjunction.of( ConditionBuilder.column("ck4").isGreaterThanInt(10), ConditionBuilder.column("col1").isGreaterThanInt(10)))); + Get existingGet2 = + new Get( + NAMESPACE_1, + TABLE_1, + partitionKey1, + clusteringKey1, + Consistency.LINEARIZABLE, + ImmutableMap.of(AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", "readTag"), + Collections.emptyList(), + ImmutableSet.of()); // Act - Get newGet = - Get.newBuilder(existingGet) + Get newGet1 = + Get.newBuilder(existingGet1) .partitionKey(partitionKey2) .clusteringKey(clusteringKey2) .namespace(NAMESPACE_2) @@ -660,10 +680,12 @@ public void buildGet_FromExistingWithoutChange_ShouldCopy() { .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") .build(); + Get newGet2 = Get.newBuilder(existingGet2).clearReadTag("policyName1").build(); // Assert - assertThat(newGet) + assertThat(newGet1) .isEqualTo( new Get( NAMESPACE_2, @@ -671,7 +693,15 @@ public void buildGet_FromExistingWithoutChange_ShouldCopy() { partitionKey2, clusteringKey2, Consistency.EVENTUAL, - ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6"), + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("c3", "c4", "c5", "c6", "c7"), ImmutableSet.of( Conjunction.of( @@ -680,6 +710,17 @@ public void buildGet_FromExistingWithoutChange_ShouldCopy() { Conjunction.of( ConditionBuilder.column("ck4").isGreaterThanInt(10), ConditionBuilder.column("col1").isGreaterThanInt(10))))); + assertThat(newGet2) + .isEqualTo( + new Get( + NAMESPACE_1, + TABLE_1, + partitionKey1, + clusteringKey1, + Consistency.LINEARIZABLE, + ImmutableMap.of(), + Collections.emptyList(), + ImmutableSet.of())); } @Test @@ -1230,6 +1271,7 @@ public void buildGetWithIndex_WithMandatoryParameters_ShouldBuildGetWithMandator .projections("c5", "c6") .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") .build(); // Assert @@ -1240,7 +1282,15 @@ public void buildGetWithIndex_WithMandatoryParameters_ShouldBuildGetWithMandator TABLE_1, indexKey1, Consistency.EVENTUAL, - ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("c1", "c2", "c3", "c4", "c5", "c6"), ImmutableSet.of())); } @@ -1706,7 +1756,7 @@ public void buildGetWithIndex_FromExistingWithoutChange_ShouldCopy() { public void buildGetWithIndex_FromExistingAndUpdateAllParameters_ShouldBuildGetWithUpdatedParameters() { // Arrange - GetWithIndex existingGet = + GetWithIndex existingGet1 = new GetWithIndex( NAMESPACE_1, TABLE_1, @@ -1715,10 +1765,19 @@ public void buildGetWithIndex_FromExistingWithoutChange_ShouldCopy() { ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), Arrays.asList("c1", "c2"), ImmutableSet.of()); + GetWithIndex existingGet2 = + new GetWithIndex( + NAMESPACE_1, + TABLE_1, + indexKey1, + Consistency.EVENTUAL, + ImmutableMap.of(AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", "readTag"), + Collections.emptyList(), + ImmutableSet.of()); // Act - Get newGet = - Get.newBuilder(existingGet) + Get newGet1 = + Get.newBuilder(existingGet1) .indexKey(indexKey2) .namespace(NAMESPACE_2) .table(TABLE_2) @@ -1731,19 +1790,39 @@ public void buildGetWithIndex_FromExistingWithoutChange_ShouldCopy() { .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") .build(); + Get newGet2 = Get.newBuilder(existingGet2).clearReadTag("policyName1").build(); // Assert - assertThat(newGet) + assertThat(newGet1) .isEqualTo( new GetWithIndex( NAMESPACE_2, TABLE_2, indexKey2, Consistency.EVENTUAL, - ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6"), + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("c3", "c4", "c5", "c6", "c7"), ImmutableSet.of())); + assertThat(newGet2) + .isEqualTo( + new GetWithIndex( + NAMESPACE_1, + TABLE_1, + indexKey1, + Consistency.EVENTUAL, + ImmutableMap.of(), + Collections.emptyList(), + ImmutableSet.of())); } @Test diff --git a/core/src/test/java/com/scalar/db/api/ScanBuilderTest.java b/core/src/test/java/com/scalar/db/api/ScanBuilderTest.java index 32b7b159f1..db8d8fe2c6 100644 --- a/core/src/test/java/com/scalar/db/api/ScanBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/ScanBuilderTest.java @@ -9,6 +9,7 @@ import com.scalar.db.api.Selection.Conjunction; import com.scalar.db.io.Key; import java.util.Arrays; +import java.util.Collections; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mock; @@ -102,6 +103,7 @@ public void buildScan_ScanWithAllParameters_ShouldBuildScanCorrectly() { .consistency(Consistency.EVENTUAL) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") .build(); // Assert @@ -112,7 +114,15 @@ public void buildScan_ScanWithAllParameters_ShouldBuildScanCorrectly() { TABLE_1, partitionKey1, Consistency.EVENTUAL, - ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("pk1", "ck1", "ck2", "ck3", "ck4"), ImmutableSet.of(), startClusteringKey1, @@ -201,7 +211,7 @@ public void buildScan_FromExistingWithoutChange_ShouldCopy() { @Test public void buildScan_FromExistingAndUpdateAllParameters_ShouldBuildScanWithUpdatedParameters() { // Arrange - Scan existingScan = + Scan existingScan1 = new Scan( NAMESPACE_1, TABLE_1, @@ -216,10 +226,25 @@ public void buildScan_FromExistingAndUpdateAllParameters_ShouldBuildScanWithUpda true, Arrays.asList(ordering1, ordering2), 10); + Scan existingScan2 = + new Scan( + NAMESPACE_1, + TABLE_1, + partitionKey1, + Consistency.EVENTUAL, + ImmutableMap.of(AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", "readTag"), + Collections.emptyList(), + ImmutableSet.of(), + startClusteringKey1, + false, + endClusteringKey1, + false, + Arrays.asList(ordering1, ordering2), + 10); // Act - Scan newScan = - Scan.newBuilder(existingScan) + Scan newScan1 = + Scan.newBuilder(existingScan1) .namespace(NAMESPACE_2) .table(TABLE_2) .partitionKey(partitionKey2) @@ -239,17 +264,27 @@ public void buildScan_FromExistingAndUpdateAllParameters_ShouldBuildScanWithUpda .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") .build(); + Scan newScan2 = Scan.newBuilder(existingScan2).clearReadTag("policyName1").build(); // Assert - assertThat(newScan) + assertThat(newScan1) .isEqualTo( new Scan( NAMESPACE_2, TABLE_2, partitionKey2, Consistency.LINEARIZABLE, - ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6"), + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("pk2", "ck2", "ck3", "ck4", "ck5"), ImmutableSet.of(), startClusteringKey2, @@ -258,6 +293,22 @@ public void buildScan_FromExistingAndUpdateAllParameters_ShouldBuildScanWithUpda false, Arrays.asList(ordering3, ordering4, ordering5, ordering1, ordering2), 5)); + assertThat(newScan2) + .isEqualTo( + new Scan( + NAMESPACE_1, + TABLE_1, + partitionKey1, + Consistency.EVENTUAL, + ImmutableMap.of(), + Collections.emptyList(), + ImmutableSet.of(), + startClusteringKey1, + false, + endClusteringKey1, + false, + Arrays.asList(ordering1, ordering2), + 10)); } @Test @@ -329,6 +380,7 @@ public void buildScanAll_ScanWithAllParameters_ShouldBuildScanCorrectly() { .where(ConditionBuilder.column("ck1").isGreaterThanInt(10)) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") .build(); // Assert @@ -338,7 +390,15 @@ public void buildScanAll_ScanWithAllParameters_ShouldBuildScanCorrectly() { NAMESPACE_1, TABLE_1, Consistency.EVENTUAL, - ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("pk1", "ck1", "ck2", "ck3", "ck4"), ImmutableSet.of( Conjunction.of(ConditionBuilder.column("ck1").isGreaterThanInt(10))), @@ -368,7 +428,7 @@ public void buildScanAll_FromExistingWithoutChange_ShouldCopy() { public void buildScanAll_FromExistingAndUpdateAllParameters_ShouldBuildScanWithUpdatedParameters() { // Arrange - Scan existingScan = + Scan existingScan1 = new ScanAll( NAMESPACE_1, TABLE_1, @@ -378,10 +438,20 @@ public void buildScanAll_FromExistingWithoutChange_ShouldCopy() { ImmutableSet.of(), ImmutableList.of(ordering1, ordering2), 10); + Scan existingScan2 = + new ScanAll( + NAMESPACE_1, + TABLE_1, + Consistency.EVENTUAL, + ImmutableMap.of(AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", "readTag"), + Collections.emptyList(), + ImmutableSet.of(), + ImmutableList.of(ordering1, ordering2), + 10); // Act - Scan newScan = - Scan.newBuilder(existingScan) + Scan newScan1 = + Scan.newBuilder(existingScan1) .namespace(NAMESPACE_2) .table(TABLE_2) .limit(5) @@ -398,20 +468,41 @@ public void buildScanAll_FromExistingWithoutChange_ShouldCopy() { .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") .build(); + Scan newScan2 = Scan.newBuilder(existingScan2).clearReadTag("policyName1").build(); // Assert - assertThat(newScan) + assertThat(newScan1) .isEqualTo( new ScanAll( NAMESPACE_2, TABLE_2, Consistency.LINEARIZABLE, - ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6"), + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("pk2", "ck2", "ck3", "ck4", "ck5"), ImmutableSet.of(), ImmutableList.of(ordering3, ordering4, ordering5, ordering1, ordering2), 5)); + assertThat(newScan2) + .isEqualTo( + new ScanAll( + NAMESPACE_1, + TABLE_1, + Consistency.EVENTUAL, + ImmutableMap.of(), + Collections.emptyList(), + ImmutableSet.of(), + ImmutableList.of(ordering1, ordering2), + 10)); } @Test @@ -475,6 +566,7 @@ public void buildScanWithIndex_ScanWithAllParameters_ShouldBuildScanCorrectly() .consistency(Consistency.EVENTUAL) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") .build(); // Assert @@ -485,7 +577,15 @@ public void buildScanWithIndex_ScanWithAllParameters_ShouldBuildScanCorrectly() TABLE_1, indexKey1, Consistency.EVENTUAL, - ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3"), + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("pk1", "ck1", "ck2", "ck3", "ck4"), ImmutableSet.of(), 10)); @@ -513,7 +613,7 @@ public void buildScanWithIndex_FromExistingWithoutChange_ShouldCopy() { public void buildScanWithIndex_FromExistingAndUpdateAllParameters_ShouldBuildScanWithUpdatedParameters() { // Arrange - Scan existingScan = + Scan existingScan1 = new ScanWithIndex( NAMESPACE_1, TABLE_1, @@ -523,10 +623,20 @@ public void buildScanWithIndex_FromExistingWithoutChange_ShouldCopy() { Arrays.asList("pk1", "ck1"), ImmutableSet.of(), 10); + Scan existingScan2 = + new ScanWithIndex( + NAMESPACE_1, + TABLE_1, + indexKey1, + Consistency.EVENTUAL, + ImmutableMap.of(AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", "readTag"), + Collections.emptyList(), + ImmutableSet.of(), + 10); // Act - Scan newScan = - Scan.newBuilder(existingScan) + Scan newScan1 = + Scan.newBuilder(existingScan1) .namespace(NAMESPACE_2) .table(TABLE_2) .indexKey(indexKey2) @@ -540,20 +650,41 @@ public void buildScanWithIndex_FromExistingWithoutChange_ShouldCopy() { .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") .build(); + Scan newScan2 = Scan.newBuilder(existingScan2).clearReadTag("policyName1").build(); // Assert - assertThat(newScan) + assertThat(newScan1) .isEqualTo( new ScanWithIndex( NAMESPACE_2, TABLE_2, indexKey2, Consistency.LINEARIZABLE, - ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6"), + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag"), Arrays.asList("pk2", "ck2", "ck3", "ck4", "ck5"), ImmutableSet.of(), 5)); + assertThat(newScan2) + .isEqualTo( + new ScanWithIndex( + NAMESPACE_1, + TABLE_1, + indexKey1, + Consistency.EVENTUAL, + ImmutableMap.of(), + Collections.emptyList(), + ImmutableSet.of(), + 10)); } @Test diff --git a/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java b/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java index 92712d7b0c..1d2691138b 100644 --- a/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java @@ -92,6 +92,8 @@ public void build_WithAllParameters_ShouldBuildUpdateCorrectly() { .condition(condition1) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") .build(); // Assert @@ -123,7 +125,18 @@ public void build_WithAllParameters_ShouldBuildUpdateCorrectly() { assertThat(actual.getColumns().get("text2").getTextValue()).isEqualTo("another_value"); assertThat(actual.getCondition()).hasValue(condition1); assertThat(actual.getAttributes()) - .isEqualTo(ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3")); + .isEqualTo( + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag")); } @Test @@ -219,7 +232,7 @@ public void build_FromExistingWithoutChange_ShouldCopy() { @Test public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdatedParameters() { // Arrange - Update existingUpdate = + Update existingUpdate1 = Update.newBuilder() .namespace(NAMESPACE_1) .table(TABLE_1) @@ -243,10 +256,20 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdate .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) .build(); + Update existingUpdate2 = + Update.newBuilder() + .namespace(NAMESPACE_1) + .table(TABLE_1) + .partitionKey(partitionKey1) + .clusteringKey(clusteringKey1) + .bigIntValue("bigint1", BigIntColumn.MIN_VALUE) + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") + .build(); // Act - Update newUpdate = - Update.newBuilder(existingUpdate) + Update newUpdate1 = + Update.newBuilder(existingUpdate1) .namespace(NAMESPACE_2) .table(TABLE_2) .partitionKey(partitionKey2) @@ -271,38 +294,65 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdate .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") + .build(); + Update newUpdate2 = + Update.newBuilder(existingUpdate2) + .clearReadTag("policyName1") + .clearWriteTag("policyName2") .build(); // Assert - assertThat(newUpdate.forNamespace()).hasValue(NAMESPACE_2); - assertThat(newUpdate.forTable()).hasValue(TABLE_2); - Assertions.assertThat(newUpdate.getPartitionKey()).isEqualTo(partitionKey2); - assertThat(newUpdate.getClusteringKey()).hasValue(clusteringKey2); - assertThat(newUpdate.getColumns().size()).isEqualTo(14); - assertThat(newUpdate.getColumns().get("bigint1").getBigIntValue()) + assertThat(newUpdate1.forNamespace()).hasValue(NAMESPACE_2); + assertThat(newUpdate1.forTable()).hasValue(TABLE_2); + Assertions.assertThat(newUpdate1.getPartitionKey()).isEqualTo(partitionKey2); + assertThat(newUpdate1.getClusteringKey()).hasValue(clusteringKey2); + assertThat(newUpdate1.getColumns().size()).isEqualTo(14); + assertThat(newUpdate1.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MIN_VALUE); - assertThat(newUpdate.getColumns().get("bigint2").getBigIntValue()) + assertThat(newUpdate1.getColumns().get("bigint2").getBigIntValue()) .isEqualTo(Long.valueOf(BigIntColumn.MIN_VALUE)); - assertThat(newUpdate.getColumns().get("blob1").getBlobValueAsBytes()) + assertThat(newUpdate1.getColumns().get("blob1").getBlobValueAsBytes()) .isEqualTo("foo".getBytes(StandardCharsets.UTF_8)); - assertThat(newUpdate.getColumns().get("blob2").getBlobValueAsByteBuffer()) + assertThat(newUpdate1.getColumns().get("blob2").getBlobValueAsByteBuffer()) .isEqualTo(ByteBuffer.allocate(2)); - assertThat(newUpdate.getColumns().get("bool1").getBooleanValue()).isFalse(); - assertThat(newUpdate.getColumns().get("bool2").getBooleanValue()).isFalse(); - assertThat(newUpdate.getColumns().get("double1").getDoubleValue()).isEqualTo(Double.MIN_VALUE); - assertThat(newUpdate.getColumns().get("double2").getDoubleValue()) + assertThat(newUpdate1.getColumns().get("bool1").getBooleanValue()).isFalse(); + assertThat(newUpdate1.getColumns().get("bool2").getBooleanValue()).isFalse(); + assertThat(newUpdate1.getColumns().get("double1").getDoubleValue()).isEqualTo(Double.MIN_VALUE); + assertThat(newUpdate1.getColumns().get("double2").getDoubleValue()) .isEqualTo(Double.valueOf(Double.MIN_VALUE)); - assertThat(newUpdate.getColumns().get("float1").getFloatValue()).isEqualTo(Float.MIN_VALUE); - assertThat(newUpdate.getColumns().get("float2").getFloatValue()) + assertThat(newUpdate1.getColumns().get("float1").getFloatValue()).isEqualTo(Float.MIN_VALUE); + assertThat(newUpdate1.getColumns().get("float2").getFloatValue()) .isEqualTo(Float.valueOf(Float.MIN_VALUE)); - assertThat(newUpdate.getColumns().get("int1").getIntValue()).isEqualTo(Integer.MIN_VALUE); - assertThat(newUpdate.getColumns().get("int2").getIntValue()) + assertThat(newUpdate1.getColumns().get("int1").getIntValue()).isEqualTo(Integer.MIN_VALUE); + assertThat(newUpdate1.getColumns().get("int2").getIntValue()) .isEqualTo(Integer.valueOf(Integer.MIN_VALUE)); - assertThat(newUpdate.getColumns().get("text").getTextValue()).isEqualTo("another_value"); - assertThat(newUpdate.getColumns().get("text2").getTextValue()).isEqualTo("foo"); - assertThat(newUpdate.getCondition()).hasValue(condition2); - assertThat(newUpdate.getAttributes()) - .isEqualTo(ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6")); + assertThat(newUpdate1.getColumns().get("text").getTextValue()).isEqualTo("another_value"); + assertThat(newUpdate1.getColumns().get("text2").getTextValue()).isEqualTo("foo"); + assertThat(newUpdate1.getCondition()).hasValue(condition2); + assertThat(newUpdate1.getAttributes()) + .isEqualTo( + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag")); + + assertThat(newUpdate2.forNamespace()).hasValue(NAMESPACE_1); + assertThat(newUpdate2.forTable()).hasValue(TABLE_1); + Assertions.assertThat(newUpdate2.getPartitionKey()).isEqualTo(partitionKey1); + assertThat(newUpdate2.getClusteringKey()).hasValue(clusteringKey1); + assertThat(newUpdate2.getColumns().size()).isEqualTo(1); + assertThat(newUpdate2.getColumns().get("bigint1").getBigIntValue()) + .isEqualTo(BigIntColumn.MIN_VALUE); + assertThat(newUpdate2.getAttributes()).isEmpty(); } @Test diff --git a/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java b/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java index 6fd92fb120..7d43a9c878 100644 --- a/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java @@ -87,6 +87,8 @@ public void build_WithAllParameters_ShouldBuildUpsertCorrectly() { .value(TextColumn.of("text2", "another_value")) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") .build(); // Assert @@ -117,7 +119,18 @@ public void build_WithAllParameters_ShouldBuildUpsertCorrectly() { assertThat(actual.getColumns().get("text").getTextValue()).isEqualTo("a_value"); assertThat(actual.getColumns().get("text2").getTextValue()).isEqualTo("another_value"); assertThat(actual.getAttributes()) - .isEqualTo(ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3")); + .isEqualTo( + ImmutableMap.of( + "a1", + "v1", + "a2", + "v2", + "a3", + "v3", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag")); } @Test @@ -210,7 +223,7 @@ public void build_FromExistingWithoutChange_ShouldCopy() { @Test public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdatedParameters() { // Arrange - Upsert existingUpsert = + Upsert existingUpsert1 = Upsert.newBuilder() .namespace(NAMESPACE_1) .table(TABLE_1) @@ -233,10 +246,20 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdate .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) .build(); + Upsert existingUpsert2 = + Upsert.newBuilder() + .namespace(NAMESPACE_1) + .table(TABLE_1) + .partitionKey(partitionKey1) + .clusteringKey(clusteringKey1) + .bigIntValue("bigint1", BigIntColumn.MAX_VALUE) + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") + .build(); // Act - Upsert newUpsert = - Upsert.newBuilder(existingUpsert) + Upsert newUpsert1 = + Upsert.newBuilder(existingUpsert1) .namespace(NAMESPACE_2) .table(TABLE_2) .partitionKey(partitionKey2) @@ -260,37 +283,64 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdate .attribute("a4", "v4") .attributes(ImmutableMap.of("a5", "v5", "a6", "v6", "a7", "v7")) .clearAttribute("a7") + .readTag("policyName1", "readTag") + .writeTag("policyName2", "writeTag") + .build(); + Upsert newUpsert2 = + Upsert.newBuilder(existingUpsert2) + .clearReadTag("policyName1") + .clearWriteTag("policyName2") .build(); // Assert - assertThat(newUpsert.forNamespace()).hasValue(NAMESPACE_2); - assertThat(newUpsert.forTable()).hasValue(TABLE_2); - Assertions.assertThat(newUpsert.getPartitionKey()).isEqualTo(partitionKey2); - assertThat(newUpsert.getClusteringKey()).hasValue(clusteringKey2); - assertThat(newUpsert.getColumns().size()).isEqualTo(14); - assertThat(newUpsert.getColumns().get("bigint1").getBigIntValue()) + assertThat(newUpsert1.forNamespace()).hasValue(NAMESPACE_2); + assertThat(newUpsert1.forTable()).hasValue(TABLE_2); + Assertions.assertThat(newUpsert1.getPartitionKey()).isEqualTo(partitionKey2); + assertThat(newUpsert1.getClusteringKey()).hasValue(clusteringKey2); + assertThat(newUpsert1.getColumns().size()).isEqualTo(14); + assertThat(newUpsert1.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MIN_VALUE); - assertThat(newUpsert.getColumns().get("bigint2").getBigIntValue()) + assertThat(newUpsert1.getColumns().get("bigint2").getBigIntValue()) .isEqualTo(Long.valueOf(BigIntColumn.MIN_VALUE)); - assertThat(newUpsert.getColumns().get("blob1").getBlobValueAsBytes()) + assertThat(newUpsert1.getColumns().get("blob1").getBlobValueAsBytes()) .isEqualTo("foo".getBytes(StandardCharsets.UTF_8)); - assertThat(newUpsert.getColumns().get("blob2").getBlobValueAsByteBuffer()) + assertThat(newUpsert1.getColumns().get("blob2").getBlobValueAsByteBuffer()) .isEqualTo(ByteBuffer.allocate(2)); - assertThat(newUpsert.getColumns().get("bool1").getBooleanValue()).isFalse(); - assertThat(newUpsert.getColumns().get("bool2").getBooleanValue()).isFalse(); - assertThat(newUpsert.getColumns().get("double1").getDoubleValue()).isEqualTo(Double.MIN_VALUE); - assertThat(newUpsert.getColumns().get("double2").getDoubleValue()) + assertThat(newUpsert1.getColumns().get("bool1").getBooleanValue()).isFalse(); + assertThat(newUpsert1.getColumns().get("bool2").getBooleanValue()).isFalse(); + assertThat(newUpsert1.getColumns().get("double1").getDoubleValue()).isEqualTo(Double.MIN_VALUE); + assertThat(newUpsert1.getColumns().get("double2").getDoubleValue()) .isEqualTo(Double.valueOf(Double.MIN_VALUE)); - assertThat(newUpsert.getColumns().get("float1").getFloatValue()).isEqualTo(Float.MIN_VALUE); - assertThat(newUpsert.getColumns().get("float2").getFloatValue()) + assertThat(newUpsert1.getColumns().get("float1").getFloatValue()).isEqualTo(Float.MIN_VALUE); + assertThat(newUpsert1.getColumns().get("float2").getFloatValue()) .isEqualTo(Float.valueOf(Float.MIN_VALUE)); - assertThat(newUpsert.getColumns().get("int1").getIntValue()).isEqualTo(Integer.MIN_VALUE); - assertThat(newUpsert.getColumns().get("int2").getIntValue()) + assertThat(newUpsert1.getColumns().get("int1").getIntValue()).isEqualTo(Integer.MIN_VALUE); + assertThat(newUpsert1.getColumns().get("int2").getIntValue()) .isEqualTo(Integer.valueOf(Integer.MIN_VALUE)); - assertThat(newUpsert.getColumns().get("text").getTextValue()).isEqualTo("another_value"); - assertThat(newUpsert.getColumns().get("text2").getTextValue()).isEqualTo("foo"); - assertThat(newUpsert.getAttributes()) - .isEqualTo(ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6")); + assertThat(newUpsert1.getColumns().get("text").getTextValue()).isEqualTo("another_value"); + assertThat(newUpsert1.getColumns().get("text2").getTextValue()).isEqualTo("foo"); + assertThat(newUpsert1.getAttributes()) + .isEqualTo( + ImmutableMap.of( + "a4", + "v4", + "a5", + "v5", + "a6", + "v6", + AbacOperationAttributes.READ_TAG_PREFIX + "policyName1", + "readTag", + AbacOperationAttributes.WRITE_TAG_PREFIX + "policyName2", + "writeTag")); + + assertThat(newUpsert2.forNamespace()).hasValue(NAMESPACE_1); + assertThat(newUpsert2.forTable()).hasValue(TABLE_1); + Assertions.assertThat(newUpsert2.getPartitionKey()).isEqualTo(partitionKey1); + assertThat(newUpsert2.getClusteringKey()).hasValue(clusteringKey1); + assertThat(newUpsert2.getColumns().size()).isEqualTo(1); + assertThat(newUpsert2.getColumns().get("bigint1").getBigIntValue()) + .isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(newUpsert2.getAttributes()).isEmpty(); } @Test From 7faf7a510ae995e72926e2783bf06f5408c45c9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Dec 2024 14:20:10 +0900 Subject: [PATCH 02/18] Bump org.assertj:assertj-core from 3.26.3 to 3.27.0 in the dependencies group (#2439) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index f62511d210..e8a69417ef 100644 --- a/build.gradle +++ b/build.gradle @@ -40,7 +40,7 @@ subprojects { commonsTextVersion = '1.13.0' junitVersion = '5.11.4' commonsLangVersion = '3.17.0' - assertjVersion = '3.26.3' + assertjVersion = '3.27.0' mockitoVersion = '4.11.0' spotbugsVersion = '4.8.6' errorproneVersion = '2.10.0' From 8155eba8671cf91959a47eec08635b3f7dfb1548 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Mon, 6 Jan 2025 11:38:32 +0900 Subject: [PATCH 03/18] Add prefix to error code (#2444) --- core/src/main/java/com/scalar/db/common/error/CoreError.java | 2 +- .../test/java/com/scalar/db/common/error/CoreErrorTest.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index 5d2eba33e5..eeafaea873 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -979,7 +979,7 @@ public enum CoreError implements ScalarDbError { ""), ; - private static final String COMPONENT_NAME = "CORE"; + private static final String COMPONENT_NAME = "DB-CORE"; private final Category category; private final String id; diff --git a/core/src/test/java/com/scalar/db/common/error/CoreErrorTest.java b/core/src/test/java/com/scalar/db/common/error/CoreErrorTest.java index 00026b2d4d..5ab0c4397e 100644 --- a/core/src/test/java/com/scalar/db/common/error/CoreErrorTest.java +++ b/core/src/test/java/com/scalar/db/common/error/CoreErrorTest.java @@ -23,7 +23,7 @@ public void buildCode_ShouldBuildCorrectCode() { String code = error.buildCode(); // Assert - Assertions.assertThat(code).isEqualTo("CORE-10000"); + Assertions.assertThat(code).isEqualTo("DB-CORE-10000"); } @Test @@ -43,6 +43,6 @@ public void buildMessage_ShouldBuildCorrectMessage() { // Assert Assertions.assertThat(message) - .isEqualTo("CORE-10000: Only a single-column index is supported. Operation: " + put); + .isEqualTo("DB-CORE-10000: Only a single-column index is supported. Operation: " + put); } } From eba0c634448de98ec856736050a18fd86c34f112 Mon Sep 17 00:00:00 2001 From: Vincent Guilpain Date: Mon, 6 Jan 2025 13:46:23 +0900 Subject: [PATCH 04/18] Fix Slf4j providers not found error (#2420) --- core/build.gradle | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/build.gradle b/core/build.gradle index 38ff82ad00..239b40c55d 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -107,7 +107,9 @@ dependencies { implementation "com.microsoft.sqlserver:mssql-jdbc:${sqlserverDriverVersion}" implementation "org.xerial:sqlite-jdbc:${sqliteDriverVersion}" implementation "com.yugabyte:jdbc-yugabytedb:${yugabyteDriverVersion}" - implementation "org.mariadb.jdbc:mariadb-java-client:${mariadDbDriverVersion}" + implementation ("org.mariadb.jdbc:mariadb-java-client:${mariadDbDriverVersion}") { + exclude group: 'org.slf4j', module: 'slf4j-api' + } implementation "org.apache.commons:commons-text:${commonsTextVersion}" testImplementation "org.junit.jupiter:junit-jupiter-api:${junitVersion}" testImplementation "org.junit.jupiter:junit-jupiter-params:${junitVersion}" From e8de4c5d68267b023238cb8899198cfab3440b14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:50:23 +0900 Subject: [PATCH 05/18] Bump the dependencies group with 2 updates (#2451) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index e8a69417ef..069cd7e345 100644 --- a/build.gradle +++ b/build.gradle @@ -33,14 +33,14 @@ subprojects { postgresqlDriverVersion = '42.7.4' oracleDriverVersion = '21.16.0.0' sqlserverDriverVersion = '11.2.3.jre8' - sqliteDriverVersion = '3.47.1.0' + sqliteDriverVersion = '3.47.2.0' yugabyteDriverVersion = '42.7.3-yb-2' mariadDbDriverVersion = '3.5.1' picocliVersion = '4.7.6' commonsTextVersion = '1.13.0' junitVersion = '5.11.4' commonsLangVersion = '3.17.0' - assertjVersion = '3.27.0' + assertjVersion = '3.27.2' mockitoVersion = '4.11.0' spotbugsVersion = '4.8.6' errorproneVersion = '2.10.0' From a2a68f9264e99617bf57db8b83251c359ab33890 Mon Sep 17 00:00:00 2001 From: inv-jishnu <31100916+inv-jishnu@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:48:52 +0530 Subject: [PATCH 06/18] Add export options validator (#2435) Co-authored-by: Peckstadt Yves --- .../com/scalar/db/common/error/CoreError.java | 22 +++ .../ExportOptionsValidationException.java | 14 ++ .../validation/ExportOptionsValidator.java | 156 +++++++++++++++ .../ExportOptionsValidatorTest.java | 183 ++++++++++++++++++ 4 files changed, 375 insertions(+) create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java create mode 100644 data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index eeafaea873..397ac6ac01 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -696,6 +696,28 @@ public enum CoreError implements ScalarDbError { "The attribute-based access control feature is not enabled. To use this feature, you must enable it. Note that this feature is supported only in the ScalarDB Enterprise edition", "", ""), + DATA_LOADER_CLUSTERING_KEY_NOT_FOUND( + Category.USER_ERROR, "0153", "The provided clustering key %s was not found", "", ""), + DATA_LOADER_INVALID_PROJECTION( + Category.USER_ERROR, "0154", "The column '%s' was not found", "", ""), + DATA_LOADER_INCOMPLETE_PARTITION_KEY( + Category.USER_ERROR, + "0155", + "The provided partition key is incomplete. Required key: %s", + "", + ""), + DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH( + Category.USER_ERROR, + "0156", + "The provided clustering key order does not match the table schema. Required order: %s", + "", + ""), + DATA_LOADER_PARTITION_KEY_ORDER_MISMATCH( + Category.USER_ERROR, + "0157", + "The provided partition key order does not match the table schema. Required order: %s", + "", + ""), // // Errors for the concurrency error category diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java new file mode 100644 index 0000000000..42e342dec7 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java @@ -0,0 +1,14 @@ +package com.scalar.db.dataloader.core.dataexport.validation; + +/** A custom exception for export options validation errors */ +public class ExportOptionsValidationException extends Exception { + + /** + * Class constructor + * + * @param message error message + */ + public ExportOptionsValidationException(String message) { + super(message); + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java new file mode 100644 index 0000000000..7bf7645b0e --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java @@ -0,0 +1,156 @@ +package com.scalar.db.dataloader.core.dataexport.validation; + +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.dataloader.core.ScanRange; +import com.scalar.db.dataloader.core.dataexport.ExportOptions; +import com.scalar.db.io.Column; +import com.scalar.db.io.Key; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +/** + * A validator for ensuring that export options are consistent with the ScalarDB table metadata and + * follow the defined constraints. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) +public class ExportOptionsValidator { + + /** + * Validates the export request. + * + * @param exportOptions The export options provided by the user. + * @param tableMetadata The metadata of the ScalarDB table to validate against. + * @throws ExportOptionsValidationException If the export options are invalid. + */ + public static void validate(ExportOptions exportOptions, TableMetadata tableMetadata) + throws ExportOptionsValidationException { + LinkedHashSet partitionKeyNames = tableMetadata.getPartitionKeyNames(); + LinkedHashSet clusteringKeyNames = tableMetadata.getClusteringKeyNames(); + ScanRange scanRange = exportOptions.getScanRange(); + + validatePartitionKey(partitionKeyNames, exportOptions.getScanPartitionKey()); + validateProjectionColumns(tableMetadata.getColumnNames(), exportOptions.getProjectionColumns()); + validateSortOrders(clusteringKeyNames, exportOptions.getSortOrders()); + + if (scanRange.getScanStartKey() != null) { + validateClusteringKey(clusteringKeyNames, scanRange.getScanStartKey()); + } + if (scanRange.getScanEndKey() != null) { + validateClusteringKey(clusteringKeyNames, scanRange.getScanEndKey()); + } + } + + /* + * Check if the provided partition key is available in the ScalarDB table + * @param partitionKeyNames List of partition key names available in a + * @param key To be validated ScalarDB key + * @throws ExportOptionsValidationException if the key could not be found or is not a partition + */ + private static void validatePartitionKey(LinkedHashSet partitionKeyNames, Key key) + throws ExportOptionsValidationException { + if (partitionKeyNames == null || key == null) { + return; + } + + // Make sure that all partition key columns are provided + if (partitionKeyNames.size() != key.getColumns().size()) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_INCOMPLETE_PARTITION_KEY.buildMessage(partitionKeyNames)); + } + + // Check if the order of columns in key.getColumns() matches the order in partitionKeyNames + Iterator partitionKeyIterator = partitionKeyNames.iterator(); + for (Column column : key.getColumns()) { + // Check if the column names match in order + if (!partitionKeyIterator.hasNext() + || !partitionKeyIterator.next().equals(column.getName())) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_PARTITION_KEY_ORDER_MISMATCH.buildMessage(partitionKeyNames)); + } + } + } + + private static void validateSortOrders( + LinkedHashSet clusteringKeyNames, List sortOrders) + throws ExportOptionsValidationException { + if (sortOrders == null || sortOrders.isEmpty()) { + return; + } + + for (Scan.Ordering sortOrder : sortOrders) { + checkIfColumnExistsAsClusteringKey(clusteringKeyNames, sortOrder.getColumnName()); + } + } + + /** + * Validates that the clustering key columns in the given Key object match the expected order + * defined in the clusteringKeyNames. The Key can be a prefix of the clusteringKeyNames, but the + * order must remain consistent. + * + * @param clusteringKeyNames the expected ordered set of clustering key names + * @param key the Key object containing the actual clustering key columns + * @throws ExportOptionsValidationException if the order or names of clustering keys do not match + */ + private static void validateClusteringKey(LinkedHashSet clusteringKeyNames, Key key) + throws ExportOptionsValidationException { + // If either clusteringKeyNames or key is null, no validation is needed + if (clusteringKeyNames == null || key == null) { + return; + } + + // Create an iterator to traverse the clusteringKeyNames in order + Iterator clusteringKeyIterator = clusteringKeyNames.iterator(); + + // Iterate through the columns in the given Key + for (Column column : key.getColumns()) { + // If clusteringKeyNames have been exhausted but columns still exist in the Key, + // it indicates a mismatch + if (!clusteringKeyIterator.hasNext()) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH.buildMessage(clusteringKeyNames)); + } + + // Get the next expected clustering key name + String expectedKey = clusteringKeyIterator.next(); + + // Check if the current column name matches the expected clustering key name + if (!column.getName().equals(expectedKey)) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH.buildMessage(clusteringKeyNames)); + } + } + } + + private static void checkIfColumnExistsAsClusteringKey( + LinkedHashSet clusteringKeyNames, String columnName) + throws ExportOptionsValidationException { + if (clusteringKeyNames == null || columnName == null) { + return; + } + + if (!clusteringKeyNames.contains(columnName)) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_CLUSTERING_KEY_NOT_FOUND.buildMessage(columnName)); + } + } + + private static void validateProjectionColumns( + LinkedHashSet columnNames, List columns) + throws ExportOptionsValidationException { + if (columns == null || columns.isEmpty()) { + return; + } + + for (String column : columns) { + if (!columnNames.contains(column)) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_INVALID_PROJECTION.buildMessage(column)); + } + } + } +} diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java new file mode 100644 index 0000000000..b36522a0fc --- /dev/null +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java @@ -0,0 +1,183 @@ +package com.scalar.db.dataloader.core.dataexport.validation; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.dataloader.core.FileFormat; +import com.scalar.db.dataloader.core.ScanRange; +import com.scalar.db.dataloader.core.dataexport.ExportOptions; +import com.scalar.db.io.DataType; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.Key; +import com.scalar.db.io.TextColumn; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ExportOptionsValidatorTest { + + private TableMetadata singlePkCkMetadata; + private TableMetadata multiplePkCkMetadata; + private List projectedColumns; + + @BeforeEach + void setup() { + singlePkCkMetadata = createMockMetadata(1, 1); + multiplePkCkMetadata = createMockMetadata(2, 2); + projectedColumns = createProjectedColumns(); + } + + private TableMetadata createMockMetadata(int pkCount, int ckCount) { + TableMetadata.Builder builder = TableMetadata.newBuilder(); + + // Add partition keys + for (int i = 1; i <= pkCount; i++) { + builder.addColumn("pk" + i, DataType.INT); + builder.addPartitionKey("pk" + i); + } + + // Add clustering keys + for (int i = 1; i <= ckCount; i++) { + builder.addColumn("ck" + i, DataType.TEXT); + builder.addClusteringKey("ck" + i); + } + + return builder.build(); + } + + private List createProjectedColumns() { + List columns = new ArrayList<>(); + columns.add("pk1"); + columns.add("ck1"); + return columns; + } + + @Test + void validate_withValidExportOptionsForSinglePkCk_ShouldNotThrowException() + throws ExportOptionsValidationException { + + Key partitionKey = Key.newBuilder().add(IntColumn.of("pk1", 1)).build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", partitionKey, FileFormat.JSON) + .projectionColumns(projectedColumns) + .scanRange(new ScanRange(null, null, false, false)) + .build(); + + ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata); + } + + @Test + void validate_withValidExportOptionsForMultiplePkCk_ShouldNotThrowException() + throws ExportOptionsValidationException { + + Key partitionKey = + Key.newBuilder().add(IntColumn.of("pk1", 1)).add(IntColumn.of("pk2", 2)).build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", partitionKey, FileFormat.JSON) + .projectionColumns(projectedColumns) + .scanRange(new ScanRange(null, null, false, false)) + .build(); + + ExportOptionsValidator.validate(exportOptions, multiplePkCkMetadata); + } + + @Test + void validate_withIncompletePartitionKeyForSinglePk_ShouldThrowException() { + Key incompletePartitionKey = Key.newBuilder().build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", incompletePartitionKey, FileFormat.JSON).build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage( + CoreError.DATA_LOADER_INCOMPLETE_PARTITION_KEY.buildMessage( + singlePkCkMetadata.getPartitionKeyNames())); + } + + @Test + void validate_withIncompletePartitionKeyForMultiplePks_ShouldThrowException() { + Key incompletePartitionKey = Key.newBuilder().add(IntColumn.of("pk1", 1)).build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", incompletePartitionKey, FileFormat.JSON).build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, multiplePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage( + CoreError.DATA_LOADER_INCOMPLETE_PARTITION_KEY.buildMessage( + multiplePkCkMetadata.getPartitionKeyNames())); + } + + @Test + void validate_withInvalidProjectionColumn_ShouldThrowException() { + ExportOptions exportOptions = + ExportOptions.builder( + "test", + "sample", + Key.newBuilder().add(IntColumn.of("pk1", 1)).build(), + FileFormat.JSON) + .projectionColumns(Collections.singletonList("invalid_column")) + .build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage(CoreError.DATA_LOADER_INVALID_PROJECTION.buildMessage("invalid_column")); + } + + @Test + void validate_withInvalidClusteringKeyInScanRange_ShouldThrowException() { + ScanRange scanRange = + new ScanRange( + Key.newBuilder().add(TextColumn.of("invalid_ck", "value")).build(), + Key.newBuilder().add(TextColumn.of("ck1", "value")).build(), + false, + false); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", createValidPartitionKey(), FileFormat.JSON) + .scanRange(scanRange) + .build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage(CoreError.DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH.buildMessage("[ck1]")); + } + + @Test + void validate_withInvalidPartitionKeyOrder_ShouldThrowException() { + // Partition key names are expected to be "pk1", "pk2" + LinkedHashSet partitionKeyNames = new LinkedHashSet<>(); + partitionKeyNames.add("pk1"); + partitionKeyNames.add("pk2"); + + // Create a partition key with reversed order, expecting an error + Key invalidPartitionKey = + Key.newBuilder() + .add(IntColumn.of("pk2", 2)) // Incorrect order + .add(IntColumn.of("pk1", 1)) // Incorrect order + .build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", invalidPartitionKey, FileFormat.JSON) + .projectionColumns(projectedColumns) + .scanRange(new ScanRange(null, null, false, false)) + .build(); + + // Verify that the validator throws the correct exception + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, multiplePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage( + CoreError.DATA_LOADER_PARTITION_KEY_ORDER_MISMATCH.buildMessage(partitionKeyNames)); + } + + private Key createValidPartitionKey() { + return Key.newBuilder().add(IntColumn.of("pk1", 1)).build(); + } +} From 822db196c41c4305e33576eea350aec7e8b57661 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Thu, 9 Jan 2025 14:27:04 +0900 Subject: [PATCH 07/18] Add utility setter methods for ImmutableMap to AbacOperationAttributes (#2448) --- .../db/api/AbacOperationAttributes.java | 11 ++++++ .../db/api/AbacOperationAttributesTest.java | 35 +++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java b/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java index b558f8726a..c4095f6a7a 100644 --- a/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java +++ b/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java @@ -1,5 +1,6 @@ package com.scalar.db.api; +import com.google.common.collect.ImmutableMap; import java.util.Map; import java.util.Optional; @@ -16,6 +17,11 @@ public static void setReadTag(Map attributes, String policyName, attributes.put(READ_TAG_PREFIX + policyName, readTag); } + public static void setReadTag( + ImmutableMap.Builder attributesBuilder, String policyName, String readTag) { + attributesBuilder.put(READ_TAG_PREFIX + policyName, readTag); + } + public static void clearReadTag(Map attributes, String policyName) { attributes.remove(READ_TAG_PREFIX + policyName); } @@ -29,6 +35,11 @@ public static void setWriteTag( attributes.put(WRITE_TAG_PREFIX + policyName, writeTag); } + public static void setWriteTag( + ImmutableMap.Builder attributesBuilder, String policyName, String writeTag) { + attributesBuilder.put(WRITE_TAG_PREFIX + policyName, writeTag); + } + public static void clearWriteTag(Map attributes, String policyName) { attributes.remove(WRITE_TAG_PREFIX + policyName); } diff --git a/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java b/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java index 56ed98af0f..f583512349 100644 --- a/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java +++ b/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java @@ -2,6 +2,7 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.google.common.collect.ImmutableMap; import com.scalar.db.io.Key; import java.util.HashMap; import java.util.Map; @@ -11,7 +12,7 @@ public class AbacOperationAttributesTest { @Test - public void setReadTag_ShouldSetReadTag() { + public void setReadTag_MapGiven_ShouldSetReadTag() { // Arrange Map attributes = new HashMap<>(); String policyName = "policyName"; @@ -25,6 +26,21 @@ public void setReadTag_ShouldSetReadTag() { .containsEntry(AbacOperationAttributes.READ_TAG_PREFIX + policyName, readTag); } + @Test + public void setReadTag_ImmutableMapBuilderGiven_ShouldSetReadTag() { + // Arrange + ImmutableMap.Builder attributesBuilder = ImmutableMap.builder(); + String policyName = "policyName"; + String readTag = "readTag"; + + // Act + AbacOperationAttributes.setReadTag(attributesBuilder, policyName, readTag); + + // Assert + assertThat(attributesBuilder.build()) + .containsEntry(AbacOperationAttributes.READ_TAG_PREFIX + policyName, readTag); + } + @Test public void clearReadTag_ShouldClearReadTag() { // Arrange @@ -60,7 +76,7 @@ public void clearReadTags_ShouldClearReadTags() { } @Test - public void setWriteTag_ShouldSetWriteTag() { + public void setWriteTag_MapGiven_ShouldSetWriteTag() { // Arrange Map attributes = new HashMap<>(); String policyName = "policyName"; @@ -74,6 +90,21 @@ public void setWriteTag_ShouldSetWriteTag() { .containsEntry(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName, writeTag); } + @Test + public void setWriteTag_ImmutableMapBuilderGiven_ShouldSetWriteTag() { + // Arrange + ImmutableMap.Builder attributesBuilder = ImmutableMap.builder(); + String policyName = "policyName"; + String writeTag = "writeTag"; + + // Act + AbacOperationAttributes.setWriteTag(attributesBuilder, policyName, writeTag); + + // Assert + assertThat(attributesBuilder.build()) + .containsEntry(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName, writeTag); + } + @Test public void clearWriteTag_ShouldClearWriteTag() { // Arrange From 3abd13b0cf77e78dc8c1b37280053979afef9064 Mon Sep 17 00:00:00 2001 From: Jishnu J Date: Thu, 9 Jan 2025 15:47:00 +0530 Subject: [PATCH 08/18] Initial commit --- data-loader/build.gradle | 1 + .../dataimport/log/AbstractImportLogger.java | 169 +++++++++++ .../dataimport/log/ImportLoggerConfig.java | 13 + .../dataimport/log/ImportLoggerException.java | 12 + .../core/dataimport/log/LogConstants.java | 3 + .../dataimport/log/LogStorageLocation.java | 7 + .../log/SingleFileImportLogger.java | 139 +++++++++ .../log/SplitByDataChunkImportLogger.java | 187 ++++++++++++ .../dataimport/log/writer/AwsS3LogWriter.java | 29 ++ .../log/writer/DefaultLogWriterFactory.java | 36 +++ .../log/writer/LocalFileLogWriter.java | 62 ++++ .../dataimport/log/writer/LogFileType.java | 8 + .../core/dataimport/log/writer/LogWriter.java | 14 + .../log/writer/LogWriterFactory.java | 8 + .../log/writer/LogWriterFactoryConfig.java | 15 + .../log/SingleFileImportLoggerTest.java | 268 ++++++++++++++++++ .../log/SplitByDataChunkImportLoggerTest.java | 239 ++++++++++++++++ .../writer/DefaultLogWriterFactoryTest.java | 66 +++++ 18 files changed, 1276 insertions(+) create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/AbstractImportLogger.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerConfig.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerException.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogConstants.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogStorageLocation.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLogger.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/AwsS3LogWriter.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactory.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogFileType.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriter.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactory.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactoryConfig.java create mode 100644 data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java create mode 100644 data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java create mode 100644 data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactoryTest.java diff --git a/data-loader/build.gradle b/data-loader/build.gradle index 87a057933b..836151e924 100644 --- a/data-loader/build.gradle +++ b/data-loader/build.gradle @@ -17,6 +17,7 @@ subprojects { implementation("org.apache.commons:commons-lang3:${commonsLangVersion}") implementation("commons-io:commons-io:${commonsIoVersion}") implementation("org.slf4j:slf4j-simple:${slf4jVersion}") + implementation("software.amazon.awssdk:s3:2.25.31") // Mockito testImplementation "org.mockito:mockito-core:${mockitoVersion}" diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/AbstractImportLogger.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/AbstractImportLogger.java new file mode 100644 index 0000000000..11a7493ca9 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/AbstractImportLogger.java @@ -0,0 +1,169 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +import com.fasterxml.jackson.databind.JsonNode; +import com.scalar.db.dataloader.core.Constants; +import com.scalar.db.dataloader.core.DataLoaderObjectMapper; +import com.scalar.db.dataloader.core.dataimport.ImportEventListener; +import com.scalar.db.dataloader.core.dataimport.datachunk.ImportDataChunkStatus; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriter; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTargetResult; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTargetResultStatus; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; +import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; +import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchStatus; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public abstract class AbstractImportLogger implements ImportEventListener { + + protected static final DataLoaderObjectMapper OBJECT_MAPPER = new DataLoaderObjectMapper(); + + protected final ImportLoggerConfig config; + protected final LogWriterFactory logWriterFactory; + protected final List listeners = new ArrayList<>(); + + public void addListener(ImportEventListener listener) { + listeners.add(listener); + } + + public void removeListener(ImportEventListener listener) { + listeners.remove(listener); + } + + @Override + public void onDataChunkStarted(ImportDataChunkStatus importDataChunkStatus) { + // Currently we are not logging the start of a data chunk + } + + @Override + public void onTransactionBatchStarted(ImportTransactionBatchStatus batchStatus) { + // Currently we are not logging the start of a transaction batch + notifyTransactionBatchStarted(batchStatus); + } + + @Override + public void onTransactionBatchCompleted(ImportTransactionBatchResult batchResult) { + // skip logging success records if the configuration is set to skip + if (shouldSkipLoggingSuccess(batchResult)) { + return; + } + + logTransactionBatch(batchResult); + notifyTransactionBatchCompleted(batchResult); + } + + @Override + public void onTaskComplete(ImportTaskResult taskResult) { + // TODO: we can remove this event if it's current not being used in the import Manager as well + } + + protected abstract void logTransactionBatch(ImportTransactionBatchResult batchResult); + + protected boolean shouldSkipLoggingSuccess(ImportTransactionBatchResult batchResult) { + return batchResult.isSuccess() && !config.isLogSuccessRecords(); + } + + protected JsonNode createFilteredTransactionBatchLogJsonNode( + ImportTransactionBatchResult batchResult) { + + // If the batch result does not contain any records, return the batch result as is + if (batchResult.getRecords() == null) { + return OBJECT_MAPPER.valueToTree(batchResult); + } + + // Create a new list to store the modified import task results + List modifiedRecords = new ArrayList<>(); + + // Loop over the records in the batchResult + for (ImportTaskResult taskResult : batchResult.getRecords()) { + // Create a new ImportTaskResult and not add the raw record yet + List targetResults = + batchResult.isSuccess() + ? taskResult.getTargets() + : updateTargetStatusForAbortedTransactionBatch(taskResult.getTargets()); + ImportTaskResult.ImportTaskResultBuilder builder = + ImportTaskResult.builder() + .rowNumber(taskResult.getRowNumber()) + .targets(targetResults) + .dataChunkId(taskResult.getDataChunkId()) + .rowNumber(taskResult.getRowNumber()); + + // Only add the raw record if the configuration is set to log raw source data + if (config.isLogRawSourceRecords()) { + builder.rawRecord(taskResult.getRawRecord()); + } + ImportTaskResult modifiedTaskResult = builder.build(); + + // Add the modified task result to the list + modifiedRecords.add(modifiedTaskResult); + } + + // Create a new transaction batch result with the modified import task results + ImportTransactionBatchResult modifiedBatchResult = + ImportTransactionBatchResult.builder() + .dataChunkId(batchResult.getDataChunkId()) + .transactionBatchId(batchResult.getTransactionBatchId()) + .transactionId(batchResult.getTransactionId()) + .records(modifiedRecords) + .errors(batchResult.getErrors()) + .success(batchResult.isSuccess()) + .build(); + + // Convert the modified batch result to a JsonNode + return OBJECT_MAPPER.valueToTree(modifiedBatchResult); + } + + protected void closeLogWriter(LogWriter logWriter) { + if (logWriter != null) { + try { + logWriter.close(); + } catch (IOException e) { + logError("Failed to close a log writer", e); + } + } + } + + protected abstract void logError(String errorMessage, Exception e); + + protected LogWriter createLogWriter(String logFilePath) throws IOException { + return logWriterFactory.createLogWriter(logFilePath); + } + + private void notifyTransactionBatchStarted(ImportTransactionBatchStatus status) { + for (ImportEventListener listener : listeners) { + listener.onTransactionBatchStarted(status); + } + } + + private void notifyTransactionBatchCompleted(ImportTransactionBatchResult batchResult) { + for (ImportEventListener listener : listeners) { + listener.onTransactionBatchCompleted(batchResult); + } + } + + private List updateTargetStatusForAbortedTransactionBatch( + List targetResults) { + for (int i = 0; i < targetResults.size(); i++) { + ImportTargetResult target = targetResults.get(i); + if (target.getStatus().equals(ImportTargetResultStatus.SAVED)) { + ImportTargetResult newTarget = + ImportTargetResult.builder() + .importAction(target.getImportAction()) + .status(ImportTargetResultStatus.ABORTED) + .importedRecord(target.getImportedRecord()) + .namespace(target.getNamespace()) + .tableName(target.getTableName()) + .dataMapped(target.isDataMapped()) + .errors(Collections.singletonList(Constants.ABORT_TRANSACTION_STATUS)) + .build(); + targetResults.set(i, newTarget); + } + } + return targetResults; + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerConfig.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerConfig.java new file mode 100644 index 0000000000..fc0039bf90 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerConfig.java @@ -0,0 +1,13 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +import lombok.Builder; +import lombok.Value; + +@Value +@Builder +public class ImportLoggerConfig { + String logDirectoryPath; + boolean logSuccessRecords; + boolean logRawSourceRecords; + boolean prettyPrint; +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerException.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerException.java new file mode 100644 index 0000000000..52424c9975 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/ImportLoggerException.java @@ -0,0 +1,12 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +public class ImportLoggerException extends Exception { + + public ImportLoggerException(String message) { + super(message); + } + + public ImportLoggerException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogConstants.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogConstants.java new file mode 100644 index 0000000000..379896bbaf --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogConstants.java @@ -0,0 +1,3 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +public class LogConstants {} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogStorageLocation.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogStorageLocation.java new file mode 100644 index 0000000000..396cb3d8e4 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/LogStorageLocation.java @@ -0,0 +1,7 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +/** The location where the logs are stored. */ +public enum LogStorageLocation { + LOCAL_FILE_STORAGE, + AWS_S3 +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLogger.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLogger.java new file mode 100644 index 0000000000..fc70770761 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLogger.java @@ -0,0 +1,139 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +import com.fasterxml.jackson.databind.JsonNode; +import com.scalar.db.dataloader.core.dataimport.datachunk.ImportDataChunkStatus; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriter; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTargetResult; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTargetResultStatus; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; +import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; +import java.io.IOException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SingleFileImportLogger extends AbstractImportLogger { + + protected static final String SUMMARY_LOG_FILE_NAME = "summary.log"; + protected static final String SUCCESS_LOG_FILE_NAME = "success.json"; + protected static final String FAILURE_LOG_FILE_NAME = "failure.json"; + private static final Logger LOGGER = LoggerFactory.getLogger(SingleFileImportLogger.class); + private LogWriter summaryLogWriter; + private LogWriter successLogWriter; + private LogWriter failureLogWriter; + + public SingleFileImportLogger(ImportLoggerConfig config, LogWriterFactory logWriterFactory) + throws IOException { + super(config, logWriterFactory); + successLogWriter = createLogWriter(config.getLogDirectoryPath() + SUCCESS_LOG_FILE_NAME); + failureLogWriter = createLogWriter(config.getLogDirectoryPath() + FAILURE_LOG_FILE_NAME); + } + + @Override + public void onTaskComplete(ImportTaskResult taskResult) { + if (!config.isLogSuccessRecords() && !config.isLogRawSourceRecords()) return; + try { + writeImportTaskResultDetailToLogs(taskResult); + } catch (Exception e) { + logError("Failed to write success/failure logs", e); + } + } + + @Override + public void addOrUpdateDataChunkStatus(ImportDataChunkStatus status) {} + + @Override + public void onDataChunkCompleted(ImportDataChunkStatus dataChunkStatus) { + try { + logDataChunkSummary(dataChunkStatus); + } catch (IOException e) { + logError("Failed to log the data chunk summary", e); + } + } + + @Override + public void onAllDataChunksCompleted() { + closeAllLogWriters(); + } + + @Override + protected void logTransactionBatch(ImportTransactionBatchResult batchResult) { + try { + LogWriter logWriter = getLogWriterForTransactionBatch(batchResult); + JsonNode jsonNode = createFilteredTransactionBatchLogJsonNode(batchResult); + writeToLogWriter(logWriter, jsonNode); + } catch (IOException e) { + logError("Failed to write a transaction batch record to the log file", e); + } + } + + @Override + protected void logError(String errorMessage, Exception exception) { + LOGGER.error(errorMessage, exception); + } + + private void logDataChunkSummary(ImportDataChunkStatus dataChunkStatus) throws IOException { + if (summaryLogWriter == null) { + summaryLogWriter = createLogWriter(config.getLogDirectoryPath() + SUMMARY_LOG_FILE_NAME); + } + writeImportDataChunkSummary(dataChunkStatus, summaryLogWriter); + } + + private void writeImportDataChunkSummary( + ImportDataChunkStatus dataChunkStatus, LogWriter logWriter) throws IOException { + JsonNode jsonNode = OBJECT_MAPPER.valueToTree(dataChunkStatus); + writeToLogWriter(logWriter, jsonNode); + } + + private LogWriter getLogWriterForTransactionBatch(ImportTransactionBatchResult batchResult) + throws IOException { + String logFileName = batchResult.isSuccess() ? SUCCESS_LOG_FILE_NAME : FAILURE_LOG_FILE_NAME; + LogWriter logWriter = batchResult.isSuccess() ? successLogWriter : failureLogWriter; + if (logWriter == null) { + logWriter = createLogWriter(config.getLogDirectoryPath() + logFileName); + if (batchResult.isSuccess()) { + successLogWriter = logWriter; + } else { + failureLogWriter = logWriter; + } + } + return logWriter; + } + + private void writeImportTaskResultDetailToLogs(ImportTaskResult importTaskResult) + throws IOException { + JsonNode jsonNode; + for (ImportTargetResult target : importTaskResult.getTargets()) { + if (config.isLogSuccessRecords() + && target.getStatus().equals(ImportTargetResultStatus.SAVED)) { + synchronized (successLogWriter) { + jsonNode = OBJECT_MAPPER.valueToTree(target); + successLogWriter.write(jsonNode); + successLogWriter.flush(); + } + } + if (config.isLogRawSourceRecords() + && !target.getStatus().equals(ImportTargetResultStatus.SAVED)) { + synchronized (failureLogWriter) { + jsonNode = OBJECT_MAPPER.valueToTree(target); + failureLogWriter.write(jsonNode); + failureLogWriter.flush(); + } + } + } + } + + private void writeToLogWriter(LogWriter logWriter, JsonNode jsonNode) throws IOException { + logWriter.write(jsonNode); + logWriter.flush(); + } + + private void closeAllLogWriters() { + closeLogWriter(summaryLogWriter); + closeLogWriter(successLogWriter); + closeLogWriter(failureLogWriter); + summaryLogWriter = null; + successLogWriter = null; + failureLogWriter = null; + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java new file mode 100644 index 0000000000..ff775ea778 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java @@ -0,0 +1,187 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +import com.fasterxml.jackson.databind.JsonNode; +import com.scalar.db.dataloader.core.dataimport.datachunk.ImportDataChunkStatus; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogFileType; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriter; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTargetResult; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTargetResultStatus; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; +import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SplitByDataChunkImportLogger extends AbstractImportLogger { + + protected static final String SUMMARY_LOG_FILE_NAME_FORMAT = "data_chunk_%s_summary.json"; + protected static final String FAILURE_LOG_FILE_NAME_FORMAT = "data_chunk_%s_failure.json"; + protected static final String SUCCESS_LOG_FILE_NAME_FORMAT = "data_chunk_%s_success.json"; + + private static final Logger LOGGER = LoggerFactory.getLogger(SplitByDataChunkImportLogger.class); + private final Map summaryLogWriters = new HashMap<>(); + private final Map successLogWriters = new HashMap<>(); + private final Map failureLogWriters = new HashMap<>(); + + public SplitByDataChunkImportLogger( + ImportLoggerConfig config, LogWriterFactory logWriterFactory) { + super(config, logWriterFactory); + } + + @Override + public void onTaskComplete(ImportTaskResult taskResult) { + if (!config.isLogSuccessRecords() && !config.isLogRawSourceRecords()) return; + try { + writeImportTaskResultDetailToLogs(taskResult); + } catch (IOException e) { + LOGGER.error("Failed to write success/failure logs"); + } + } + + private void writeImportTaskResultDetailToLogs(ImportTaskResult importTaskResult) + throws IOException { + JsonNode jsonNode; + for (ImportTargetResult target : importTaskResult.getTargets()) { + if (config.isLogSuccessRecords() + && target.getStatus().equals(ImportTargetResultStatus.SAVED)) { + jsonNode = OBJECT_MAPPER.valueToTree(target); + synchronized (successLogWriters) { + LogWriter successLogWriter = + initializeLogWriterIfNeeded(LogFileType.SUCCESS, importTaskResult.getDataChunkId()); + successLogWriter.write(jsonNode); + successLogWriter.flush(); + } + } + if (config.isLogRawSourceRecords() + && !target.getStatus().equals(ImportTargetResultStatus.SAVED)) { + jsonNode = OBJECT_MAPPER.valueToTree(target); + synchronized (failureLogWriters) { + LogWriter failureLogWriter = + initializeLogWriterIfNeeded(LogFileType.FAILURE, importTaskResult.getDataChunkId()); + failureLogWriter.write(jsonNode); + failureLogWriter.flush(); + } + } + } + } + + @Override + public void addOrUpdateDataChunkStatus(ImportDataChunkStatus status) {} + + @Override + public void onDataChunkCompleted(ImportDataChunkStatus dataChunkStatus) { + try { + logDataChunkSummary(dataChunkStatus); + // Close the split log writers per data chunk if they exist for this data chunk id + closeLogWritersForDataChunk(dataChunkStatus.getDataChunkId()); + } catch (IOException e) { + LOGGER.error("Failed to log the data chunk summary", e); + } + } + + @Override + public void onAllDataChunksCompleted() { + closeAllDataChunkLogWriters(); + } + + @Override + protected void logTransactionBatch(ImportTransactionBatchResult batchResult) { + LogFileType logFileType = batchResult.isSuccess() ? LogFileType.SUCCESS : LogFileType.FAILURE; + try (LogWriter logWriter = + initializeLogWriterIfNeeded(logFileType, batchResult.getDataChunkId())) { + JsonNode jsonNode = createFilteredTransactionBatchLogJsonNode(batchResult); + synchronized (logWriter) { + logWriter.write(jsonNode); + logWriter.flush(); + } + } catch (IOException e) { + LOGGER.error("Failed to write a transaction batch record to a split mode log file", e); + } + } + + @Override + protected void logError(String errorMessage, Exception exception) { + LOGGER.error(errorMessage, exception); + } + + private void logDataChunkSummary(ImportDataChunkStatus dataChunkStatus) throws IOException { + try (LogWriter logWriter = + initializeLogWriterIfNeeded(LogFileType.SUMMARY, dataChunkStatus.getDataChunkId())) { + logWriter.write(OBJECT_MAPPER.valueToTree(dataChunkStatus)); + logWriter.flush(); + } + } + + private void closeLogWritersForDataChunk(int dataChunkId) { + closeLogWriter(successLogWriters.remove(dataChunkId)); + closeLogWriter(failureLogWriters.remove(dataChunkId)); + closeLogWriter(summaryLogWriters.remove(dataChunkId)); + } + + private void closeAllDataChunkLogWriters() { + summaryLogWriters.values().forEach(this::closeLogWriter); + successLogWriters.values().forEach(this::closeLogWriter); + failureLogWriters.values().forEach(this::closeLogWriter); + summaryLogWriters.clear(); + successLogWriters.clear(); + failureLogWriters.clear(); + } + + private String getLogFilePath(long batchId, LogFileType logFileType) { + String logfilePath; + switch (logFileType) { + case SUCCESS: + logfilePath = + config.getLogDirectoryPath() + String.format(SUCCESS_LOG_FILE_NAME_FORMAT, batchId); + break; + case FAILURE: + logfilePath = + config.getLogDirectoryPath() + String.format(FAILURE_LOG_FILE_NAME_FORMAT, batchId); + break; + case SUMMARY: + logfilePath = + config.getLogDirectoryPath() + String.format(SUMMARY_LOG_FILE_NAME_FORMAT, batchId); + break; + default: + logfilePath = ""; + } + ; + + return logfilePath; + } + + private LogWriter initializeLogWriterIfNeeded(LogFileType logFileType, int dataChunkId) + throws IOException { + Map logWriters = getLogWriters(logFileType); + if (!logWriters.containsKey(dataChunkId)) { + LogWriter logWriter = createLogWriter(logFileType, dataChunkId); + logWriters.put(dataChunkId, logWriter); + } + return logWriters.get(dataChunkId); + } + + private LogWriter createLogWriter(LogFileType logFileType, int dataChunkId) throws IOException { + String logFilePath = getLogFilePath(dataChunkId, logFileType); + return createLogWriter(logFilePath); + } + + private Map getLogWriters(LogFileType logFileType) { + Map logWriterMap = null; + switch (logFileType) { + case SUCCESS: + logWriterMap = successLogWriters; + break; + case FAILURE: + logWriterMap = failureLogWriters; + break; + case SUMMARY: + logWriterMap = summaryLogWriters; + break; + } + ; + return logWriterMap; + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/AwsS3LogWriter.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/AwsS3LogWriter.java new file mode 100644 index 0000000000..c11fab0b23 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/AwsS3LogWriter.java @@ -0,0 +1,29 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import com.fasterxml.jackson.databind.JsonNode; +import java.io.IOException; +import lombok.AllArgsConstructor; +import software.amazon.awssdk.services.s3.S3AsyncClient; + +@AllArgsConstructor +public class AwsS3LogWriter implements LogWriter { + + private final S3AsyncClient s3AsyncClient; + private final String bucketName; + private final String objectKey; + + @Override + public void write(JsonNode sourceRecord) throws IOException { + // Implementation to write content to cloud storage + } + + @Override + public void flush() throws IOException { + // Implementation to flush content to cloud storage + } + + @Override + public void close() throws IOException { + // Implementation to close the cloud storage connection + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactory.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactory.java new file mode 100644 index 0000000000..5ced1e804d --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactory.java @@ -0,0 +1,36 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import com.scalar.db.dataloader.core.dataimport.log.ImportLoggerConfig; +import java.io.IOException; +import lombok.AllArgsConstructor; + +/** A factory class to create log writers. */ +@AllArgsConstructor +public class DefaultLogWriterFactory implements LogWriterFactory { + + private final LogWriterFactoryConfig config; + private final ImportLoggerConfig importLoggerConfig; + + /** + * Creates a log writer based on the configuration. + * + * @param logFilePath the path of the log file + * @return the log writer + */ + @Override + public LogWriter createLogWriter(String logFilePath) throws IOException { + LogWriter logWriter = null; + switch (config.getLogStorageLocation()) { + case LOCAL_FILE_STORAGE: + logWriter = new LocalFileLogWriter(logFilePath, importLoggerConfig); + break; + case AWS_S3: + logWriter = + new AwsS3LogWriter( + config.getS3AsyncClient(), config.getBucketName(), config.getObjectKey()); + break; + } + ; + return logWriter; + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java new file mode 100644 index 0000000000..d251a99538 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java @@ -0,0 +1,62 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonNode; +import com.scalar.db.dataloader.core.DataLoaderObjectMapper; +import com.scalar.db.dataloader.core.dataimport.log.ImportLoggerConfig; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; + +public class LocalFileLogWriter implements LogWriter { + private final JsonGenerator logWriter; + private final DataLoaderObjectMapper objectMapper; + + /** + * Creates an instance of LocalFileLogWriter with the specified file path and log file type. + * + * @param filePath the file path + * @throws IOException if an I/O error occurs + */ + public LocalFileLogWriter(String filePath, ImportLoggerConfig importLoggerConfig) + throws IOException { + Path path = Path.of(filePath); + this.objectMapper = new DataLoaderObjectMapper(); + this.logWriter = + objectMapper + .getFactory() + .createGenerator( + Files.newBufferedWriter( + path, StandardOpenOption.CREATE, StandardOpenOption.APPEND)); + // Start the JSON array + if (importLoggerConfig.isPrettyPrint()) this.logWriter.useDefaultPrettyPrinter(); + this.logWriter.writeStartArray(); + this.logWriter.flush(); + } + + @Override + public void write(JsonNode sourceRecord) throws IOException { + if (sourceRecord == null) { + return; + } + synchronized (logWriter) { + objectMapper.writeValue(logWriter, sourceRecord); + } + } + + @Override + public void flush() throws IOException { + logWriter.flush(); + } + + @Override + public void close() throws IOException { + if (logWriter.isClosed()) { + return; + } + logWriter.writeEndArray(); + logWriter.flush(); + logWriter.close(); + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogFileType.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogFileType.java new file mode 100644 index 0000000000..5483aefc91 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogFileType.java @@ -0,0 +1,8 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +/** The type of the log writer. */ +public enum LogFileType { + SUCCESS, + FAILURE, + SUMMARY +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriter.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriter.java new file mode 100644 index 0000000000..f10917901f --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriter.java @@ -0,0 +1,14 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import com.fasterxml.jackson.databind.JsonNode; +import java.io.IOException; + +public interface LogWriter extends AutoCloseable { + + void write(JsonNode sourceRecord) throws IOException; + + void flush() throws IOException; + + @Override + void close() throws IOException; +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactory.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactory.java new file mode 100644 index 0000000000..b3c4dfc080 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactory.java @@ -0,0 +1,8 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import java.io.IOException; + +public interface LogWriterFactory { + + LogWriter createLogWriter(String logFilePath) throws IOException; +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactoryConfig.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactoryConfig.java new file mode 100644 index 0000000000..901d0aae6f --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LogWriterFactoryConfig.java @@ -0,0 +1,15 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import com.scalar.db.dataloader.core.dataimport.log.LogStorageLocation; +import lombok.Builder; +import lombok.Value; +import software.amazon.awssdk.services.s3.S3AsyncClient; + +@Builder +@Value +public class LogWriterFactoryConfig { + LogStorageLocation logStorageLocation; + S3AsyncClient s3AsyncClient; + String bucketName; + String objectKey; +} diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java new file mode 100644 index 0000000000..0a9fa0d244 --- /dev/null +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java @@ -0,0 +1,268 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.dataloader.core.DataLoaderObjectMapper; +import com.scalar.db.dataloader.core.dataimport.datachunk.ImportDataChunkStatus; +import com.scalar.db.dataloader.core.dataimport.log.writer.DefaultLogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactoryConfig; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; +import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +class SingleFileImportLoggerTest { + + private static final Logger LOGGER = LoggerFactory.getLogger(SingleFileImportLoggerTest.class); + private static final DataLoaderObjectMapper OBJECT_MAPPER = new DataLoaderObjectMapper(); + + @TempDir Path tempDir; + + private LogWriterFactory logWriterFactory; + + @BeforeEach + void setUp() { + LogWriterFactoryConfig logWriterFactoryConfig = + LogWriterFactoryConfig.builder() + .logStorageLocation(LogStorageLocation.LOCAL_FILE_STORAGE) + .build(); + ImportLoggerConfig importLoggerConfig = + ImportLoggerConfig.builder() + .prettyPrint(false) + .logSuccessRecords(false) + .logRawSourceRecords(false) + .logDirectoryPath("path") + .build(); + logWriterFactory = new DefaultLogWriterFactory(logWriterFactoryConfig, importLoggerConfig); + } + + @AfterEach + void tearDown() throws IOException { + cleanUpTempDir(); + } + + private void cleanUpTempDir() throws IOException { + try (Stream paths = Files.list(tempDir)) { + paths.forEach(this::deleteFile); + } + } + + private void deleteFile(Path file) { + try { + Files.deleteIfExists(file); + } catch (IOException e) { + LOGGER.error("Failed to delete file: {}", file, e); + } + } + + @Test + void onTransactionBatchCompleted_NoErrors_ShouldWriteToSuccessLogFile() throws IOException { + testTransactionBatchCompleted(true, true); + } + + @Test + void onTransactionBatchCompleted_HasErrors_ShouldWriteToFailureLogFile() throws IOException { + testTransactionBatchCompleted(false, true); + } + + private void testTransactionBatchCompleted(boolean success, boolean logSuccessRecords) + throws IOException { + // Arrange + ImportLoggerConfig config = + ImportLoggerConfig.builder() + .logDirectoryPath(tempDir.toString() + "/") + .logRawSourceRecords(true) + .logSuccessRecords(logSuccessRecords) + .build(); + SingleFileImportLogger importLogger = new SingleFileImportLogger(config, logWriterFactory); + + List batchResults = createBatchResults(1, success); + + // Act + for (ImportTransactionBatchResult batchResult : batchResults) { + importLogger.onTransactionBatchCompleted(batchResult); + importLogger.onDataChunkCompleted( + ImportDataChunkStatus.builder().dataChunkId(batchResult.getDataChunkId()).build()); + } + importLogger.onAllDataChunksCompleted(); + + // Assert + assertTransactionBatchResults(batchResults, success, logSuccessRecords); + } + + private List createBatchResults(int count, boolean success) { + List batchResults = new ArrayList<>(); + + for (int i = 1; i <= count; i++) { + List records = + Collections.singletonList( + ImportTaskResult.builder() + .rowNumber(i) + .rawRecord(OBJECT_MAPPER.createObjectNode()) + .targets(Collections.EMPTY_LIST) + .build()); + ImportTransactionBatchResult result = + ImportTransactionBatchResult.builder() + .dataChunkId(i) + .transactionBatchId(1) + .records(records) + .success(success) + .build(); + batchResults.add(result); + } + + return batchResults; + } + + private void assertTransactionBatchResults( + List batchResults, boolean success, boolean logSuccessRecords) + throws IOException { + DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); + + // Single file log mode + Path logFileName = + tempDir.resolve( + success + ? SingleFileImportLogger.SUCCESS_LOG_FILE_NAME + : SingleFileImportLogger.FAILURE_LOG_FILE_NAME); + if (logSuccessRecords || !success) { + assertTrue(Files.exists(logFileName), "Log file should exist"); + + String logContent = Files.readString(logFileName); + List logEntries = + objectMapper.readValue( + logContent, new TypeReference>() {}); + + assertEquals( + batchResults.size(), + logEntries.size(), + "Number of log entries should match the number of batch results"); + + for (int i = 0; i < batchResults.size(); i++) { + assertTransactionBatchResult(batchResults.get(i), logEntries.get(i)); + } + } else { + assertFalse(Files.exists(logFileName), "Log file should not exist"); + } + } + + private void assertTransactionBatchResult( + ImportTransactionBatchResult expected, ImportTransactionBatchResult actual) { + assertEquals(expected.getDataChunkId(), actual.getDataChunkId(), "Data chunk ID should match"); + assertEquals( + expected.getTransactionBatchId(), + actual.getTransactionBatchId(), + "Transaction batch ID should match"); + assertEquals( + expected.getTransactionId(), actual.getTransactionId(), "Transaction ID should match"); + assertEquals(expected.isSuccess(), actual.isSuccess(), "Success status should match"); + + List expectedRecords = expected.getRecords(); + List actualRecords = actual.getRecords(); + assertEquals(expectedRecords.size(), actualRecords.size(), "Number of records should match"); + for (int j = 0; j < expectedRecords.size(); j++) { + ImportTaskResult expectedRecord = expectedRecords.get(j); + ImportTaskResult actualRecord = actualRecords.get(j); + assertEquals( + expectedRecord.getRowNumber(), actualRecord.getRowNumber(), "Row number should match"); + assertEquals( + expectedRecord.getRawRecord(), actualRecord.getRawRecord(), "Raw record should match"); + assertEquals(expectedRecord.getTargets(), actualRecord.getTargets(), "Targets should match"); + } + } + + @Test + void onDataChunkCompleted_NoErrors_ShouldWriteToSummaryLogFile() throws IOException { + testDataChunkCompleted(false); + } + + @Test + void onDataChunkCompleted_HasErrors_ShouldWriteToSummaryLogFile() throws IOException { + testDataChunkCompleted(true); + } + + private void testDataChunkCompleted(boolean hasErrors) throws IOException { + ImportLoggerConfig config = + ImportLoggerConfig.builder() + .logDirectoryPath(tempDir.toString() + "/") + .logRawSourceRecords(true) + .logSuccessRecords(true) + .build(); + SingleFileImportLogger importLogger = new SingleFileImportLogger(config, logWriterFactory); + + List dataChunkStatuses = + Stream.of(1, 2) + .map(id -> createDataChunkStatus(id, hasErrors)) + .collect(Collectors.toList()); + + dataChunkStatuses.forEach(importLogger::onDataChunkCompleted); + importLogger.onAllDataChunksCompleted(); + + assertDataChunkStatusLog(SingleFileImportLogger.SUMMARY_LOG_FILE_NAME, dataChunkStatuses); + } + + private ImportDataChunkStatus createDataChunkStatus(int dataChunkId, boolean hasErrors) { + return ImportDataChunkStatus.builder() + .dataChunkId(dataChunkId) + .startTime(Instant.now()) + .endTime(Instant.now()) + .totalRecords(100) + .successCount(hasErrors ? 90 : 100) + .failureCount(hasErrors ? 10 : 0) + .batchCount(5) + .totalDurationInMilliSeconds(1000) + .build(); + } + + private void assertDataChunkStatusLog( + String logFilePattern, List dataChunkStatuses) throws IOException { + assertSingleFileLog(tempDir, logFilePattern, dataChunkStatuses); + } + + private void assertSingleFileLog( + Path tempDir, String logFileName, List dataChunkStatuses) + throws IOException { + Path summaryLogFile = tempDir.resolve(logFileName); + assertTrue(Files.exists(summaryLogFile)); + + String logContent = Files.readString(summaryLogFile); + DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); + List logEntries = + objectMapper.readValue(logContent, new TypeReference>() {}); + + assertEquals(dataChunkStatuses.size(), logEntries.size()); + for (int i = 0; i < dataChunkStatuses.size(); i++) { + assertDataChunkStatusEquals(dataChunkStatuses.get(i), logEntries.get(i)); + } + } + + private void assertDataChunkStatusEquals( + ImportDataChunkStatus expected, ImportDataChunkStatus actual) { + assertEquals(expected.getDataChunkId(), actual.getDataChunkId()); + assertEquals(expected.getStartTime(), actual.getStartTime()); + assertEquals(expected.getEndTime(), actual.getEndTime()); + assertEquals(expected.getTotalRecords(), actual.getTotalRecords()); + assertEquals(expected.getSuccessCount(), actual.getSuccessCount()); + assertEquals(expected.getFailureCount(), actual.getFailureCount()); + assertEquals(expected.getBatchCount(), actual.getBatchCount()); + assertEquals( + expected.getTotalDurationInMilliSeconds(), actual.getTotalDurationInMilliSeconds()); + } +} diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java new file mode 100644 index 0000000000..70e2937936 --- /dev/null +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java @@ -0,0 +1,239 @@ +package com.scalar.db.dataloader.core.dataimport.log; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.dataloader.core.DataLoaderObjectMapper; +import com.scalar.db.dataloader.core.dataimport.datachunk.ImportDataChunkStatus; +import com.scalar.db.dataloader.core.dataimport.log.writer.DefaultLogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactory; +import com.scalar.db.dataloader.core.dataimport.log.writer.LogWriterFactoryConfig; +import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; +import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +class SplitByDataChunkImportLoggerTest { + + private static final DataLoaderObjectMapper OBJECT_MAPPER = new DataLoaderObjectMapper(); + + @TempDir Path tempDir; + + private LogWriterFactory logWriterFactory; + + @BeforeEach + void setUp() { + LogWriterFactoryConfig logWriterFactoryConfig = + LogWriterFactoryConfig.builder() + .logStorageLocation(LogStorageLocation.LOCAL_FILE_STORAGE) + .build(); + ImportLoggerConfig importLoggerConfig = + ImportLoggerConfig.builder() + .prettyPrint(false) + .logSuccessRecords(false) + .logRawSourceRecords(false) + .logDirectoryPath("path") + .build(); + logWriterFactory = new DefaultLogWriterFactory(logWriterFactoryConfig, importLoggerConfig); + } + + @Test + void onTransactionBatchCompleted_NoErrors_ShouldWriteToDataChunkSuccessFiles() + throws IOException { + testTransactionBatchCompleted(true, true); + } + + @Test + void onTransactionBatchCompleted_HasErrors_ShouldWriteToDataChunkFailureFiles() + throws IOException { + testTransactionBatchCompleted(false, true); + } + + @Test + void onTransactionBatchCompleted_NoErrorsAndNoSuccessFileLogging_ShouldNotWriteToSuccessFiles() + throws IOException { + testTransactionBatchCompleted(true, false); + } + + private void testTransactionBatchCompleted(boolean success, boolean logSuccessRecords) + throws IOException { + // Arrange + ImportLoggerConfig config = + ImportLoggerConfig.builder() + .logDirectoryPath(tempDir.toString() + "/") + .logRawSourceRecords(true) + .logSuccessRecords(logSuccessRecords) + .build(); + SplitByDataChunkImportLogger importLogger = + new SplitByDataChunkImportLogger(config, logWriterFactory); + + List batchResults = new ArrayList<>(); + + for (int i = 1; i <= 3; i++) { + List records = + Collections.singletonList( + ImportTaskResult.builder() + .rowNumber(i) + .targets(Collections.EMPTY_LIST) + .rawRecord(OBJECT_MAPPER.createObjectNode()) + .build()); + ImportTransactionBatchResult result = + ImportTransactionBatchResult.builder() + .dataChunkId(i) + .transactionBatchId(1) + .records(records) + .success(success) + .build(); + batchResults.add(result); + } + + // Act + for (ImportTransactionBatchResult batchResult : batchResults) { + importLogger.onTransactionBatchCompleted(batchResult); + importLogger.onDataChunkCompleted( + ImportDataChunkStatus.builder().dataChunkId(batchResult.getDataChunkId()).build()); + } + importLogger.onAllDataChunksCompleted(); + + // Assert + for (int i = 0; i < batchResults.size(); i++) { + ImportTransactionBatchResult batchResult = batchResults.get(i); + String logFileNameFormat = + success + ? SplitByDataChunkImportLogger.SUCCESS_LOG_FILE_NAME_FORMAT + : SplitByDataChunkImportLogger.FAILURE_LOG_FILE_NAME_FORMAT; + Path dataChunkLogFileName = tempDir.resolve(String.format(logFileNameFormat, i + 1)); + + if (success && logSuccessRecords) { + assertTrue(Files.exists(dataChunkLogFileName), "Data chunk success log file should exist"); + assertTransactionBatchResult(batchResult, dataChunkLogFileName); + } else if (!success) { + assertTrue(Files.exists(dataChunkLogFileName), "Data chunk failure log file should exist"); + assertTransactionBatchResult(batchResult, dataChunkLogFileName); + } else { + assertFalse( + Files.exists(dataChunkLogFileName), "Data chunk success log file should not exist"); + } + } + } + + private void assertTransactionBatchResult( + ImportTransactionBatchResult expected, Path dataChunkLogFileName) throws IOException { + String logContent = Files.readString(dataChunkLogFileName); + DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); + List logEntries = + objectMapper.readValue( + logContent, new TypeReference>() {}); + ImportTransactionBatchResult actual = logEntries.get(0); + + assertEquals(expected.getDataChunkId(), actual.getDataChunkId(), "Data chunk ID should match"); + assertEquals( + expected.getTransactionBatchId(), + actual.getTransactionBatchId(), + "Transaction batch ID should match"); + assertEquals( + expected.getTransactionId(), actual.getTransactionId(), "Transaction ID should match"); + assertEquals(expected.isSuccess(), actual.isSuccess(), "Success status should match"); + + List expectedRecords = expected.getRecords(); + List actualRecords = actual.getRecords(); + assertEquals(expectedRecords.size(), actualRecords.size(), "Number of records should match"); + for (int j = 0; j < expectedRecords.size(); j++) { + ImportTaskResult expectedRecord = expectedRecords.get(j); + ImportTaskResult actualRecord = actualRecords.get(j); + assertEquals( + expectedRecord.getRowNumber(), actualRecord.getRowNumber(), "Row number should match"); + assertEquals( + expectedRecord.getRawRecord(), actualRecord.getRawRecord(), "Raw record should match"); + assertEquals(expectedRecord.getTargets(), actualRecord.getTargets(), "Targets should match"); + } + } + + @Test + void onDataChunkCompleted_NoErrors_ShouldWriteToSummaryLogFile() throws IOException { + testDataChunkCompleted( + String.format(SplitByDataChunkImportLogger.SUMMARY_LOG_FILE_NAME_FORMAT, "%d"), false); + } + + @Test + void onDataChunkCompleted_HasErrors_ShouldWriteToSummaryLogFile() throws IOException { + testDataChunkCompleted( + String.format(SplitByDataChunkImportLogger.SUMMARY_LOG_FILE_NAME_FORMAT, "%d"), true); + } + + private void testDataChunkCompleted(String logFilePattern, boolean hasErrors) throws IOException { + ImportLoggerConfig config = + ImportLoggerConfig.builder() + .logDirectoryPath(tempDir.toString() + "/") + .logRawSourceRecords(true) + .logSuccessRecords(true) + .build(); + SplitByDataChunkImportLogger importLogger = + new SplitByDataChunkImportLogger(config, logWriterFactory); + + List dataChunkStatuses = + IntStream.rangeClosed(1, 2) + .mapToObj(id -> createDataChunkStatus(id, hasErrors)) + .collect(Collectors.toList()); + + dataChunkStatuses.forEach(importLogger::onDataChunkCompleted); + importLogger.onAllDataChunksCompleted(); + + assertDataChunkStatusLog(logFilePattern, dataChunkStatuses); + } + + private ImportDataChunkStatus createDataChunkStatus(int dataChunkId, boolean hasErrors) { + return ImportDataChunkStatus.builder() + .dataChunkId(dataChunkId) + .startTime(Instant.now()) + .endTime(Instant.now()) + .totalRecords(100) + .successCount(hasErrors ? 90 : 100) + .failureCount(hasErrors ? 10 : 0) + .batchCount(5) + .totalDurationInMilliSeconds(1000) + .build(); + } + + private void assertDataChunkStatusLog( + String logFilePattern, List dataChunkStatuses) throws IOException { + for (ImportDataChunkStatus dataChunkStatus : dataChunkStatuses) { + String logFileName = String.format(logFilePattern, dataChunkStatus.getDataChunkId()); + Path dataChunkLogFile = tempDir.resolve(logFileName); + assertTrue(Files.exists(dataChunkLogFile), "Data chunk summary log file should exist"); + + String logContent = Files.readString(dataChunkLogFile); + DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); + List logEntries = + objectMapper.readValue(logContent, new TypeReference>() {}); + + assertEquals(1, logEntries.size()); + assertDataChunkStatusEquals(dataChunkStatus, logEntries.get(0)); + } + } + + private void assertDataChunkStatusEquals( + ImportDataChunkStatus expected, ImportDataChunkStatus actual) { + assertEquals(expected.getDataChunkId(), actual.getDataChunkId()); + assertEquals(expected.getStartTime(), actual.getStartTime()); + assertEquals(expected.getEndTime(), actual.getEndTime()); + assertEquals(expected.getTotalRecords(), actual.getTotalRecords()); + assertEquals(expected.getSuccessCount(), actual.getSuccessCount()); + assertEquals(expected.getFailureCount(), actual.getFailureCount()); + assertEquals(expected.getBatchCount(), actual.getBatchCount()); + assertEquals( + expected.getTotalDurationInMilliSeconds(), actual.getTotalDurationInMilliSeconds()); + } +} diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactoryTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactoryTest.java new file mode 100644 index 0000000000..e9102bca61 --- /dev/null +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/writer/DefaultLogWriterFactoryTest.java @@ -0,0 +1,66 @@ +package com.scalar.db.dataloader.core.dataimport.log.writer; + +import com.scalar.db.dataloader.core.dataimport.log.ImportLoggerConfig; +import com.scalar.db.dataloader.core.dataimport.log.LogStorageLocation; +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.services.s3.S3AsyncClient; + +class DefaultLogWriterFactoryTest { + + String filePath = Paths.get("").toAbsolutePath() + "/sample.log"; + DefaultLogWriterFactory defaultLogWriterFactory; + + @AfterEach + void removeFileIfCreated() { + File file = new File(filePath); + if (file.exists()) { + file.deleteOnExit(); + } + } + + @Test + void createLogWriter_withValidLocalLogFilePath_shouldReturnLocalFileLogWriterObject() + throws IOException { + defaultLogWriterFactory = + new DefaultLogWriterFactory( + LogWriterFactoryConfig.builder() + .logStorageLocation(LogStorageLocation.LOCAL_FILE_STORAGE) + .build(), + ImportLoggerConfig.builder() + .prettyPrint(false) + .logSuccessRecords(false) + .logRawSourceRecords(false) + .logDirectoryPath("path") + .build()); + LogWriter logWriter = defaultLogWriterFactory.createLogWriter(filePath); + Assertions.assertEquals(LocalFileLogWriter.class, logWriter.getClass()); + logWriter.close(); + } + + @Test + void createLogWriter_withValidFilePath_shouldReturnLogWriterObject() throws IOException { + defaultLogWriterFactory = + new DefaultLogWriterFactory( + LogWriterFactoryConfig.builder() + .logStorageLocation(LogStorageLocation.AWS_S3) + .bucketName("bucket") + .objectKey("ObjectKay") + .s3AsyncClient(Mockito.mock(S3AsyncClient.class)) + .build(), + ImportLoggerConfig.builder() + .prettyPrint(false) + .logSuccessRecords(false) + .logRawSourceRecords(false) + .logDirectoryPath("path") + .build()); + LogWriter logWriter = defaultLogWriterFactory.createLogWriter(filePath); + Assertions.assertEquals(AwsS3LogWriter.class, logWriter.getClass()); + logWriter.close(); + } +} From 10409a82f78f1ebbcbb766f2ab043e30528cbb5c Mon Sep 17 00:00:00 2001 From: Jishnu J Date: Tue, 14 Jan 2025 16:57:08 +0530 Subject: [PATCH 09/18] Change code to be compatible with java 8 --- .../core/dataimport/log/writer/LocalFileLogWriter.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java index d251a99538..b29395e8ec 100644 --- a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/writer/LocalFileLogWriter.java @@ -7,6 +7,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardOpenOption; public class LocalFileLogWriter implements LogWriter { @@ -21,7 +22,7 @@ public class LocalFileLogWriter implements LogWriter { */ public LocalFileLogWriter(String filePath, ImportLoggerConfig importLoggerConfig) throws IOException { - Path path = Path.of(filePath); + Path path = Paths.get(filePath); this.objectMapper = new DataLoaderObjectMapper(); this.logWriter = objectMapper From 0180a37364a191c91105f9ff0f7621138f3d7a0b Mon Sep 17 00:00:00 2001 From: Jishnu J Date: Tue, 14 Jan 2025 17:15:41 +0530 Subject: [PATCH 10/18] Change code to be compatible with java 8 -2 --- .../core/dataimport/log/SplitByDataChunkImportLogger.java | 3 --- .../core/dataimport/log/SingleFileImportLoggerTest.java | 6 ++++-- .../dataimport/log/SplitByDataChunkImportLoggerTest.java | 8 ++++++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java index ff775ea778..bec306ef9b 100644 --- a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLogger.java @@ -148,8 +148,6 @@ private String getLogFilePath(long batchId, LogFileType logFileType) { default: logfilePath = ""; } - ; - return logfilePath; } @@ -181,7 +179,6 @@ private Map getLogWriters(LogFileType logFileType) { logWriterMap = summaryLogWriters; break; } - ; return logWriterMap; } } diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java index 0a9fa0d244..98e58109e7 100644 --- a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SingleFileImportLoggerTest.java @@ -13,6 +13,7 @@ import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.time.Instant; @@ -145,7 +146,8 @@ private void assertTransactionBatchResults( if (logSuccessRecords || !success) { assertTrue(Files.exists(logFileName), "Log file should exist"); - String logContent = Files.readString(logFileName); + String logContent = new String(Files.readAllBytes(logFileName), StandardCharsets.UTF_8); + List logEntries = objectMapper.readValue( logContent, new TypeReference>() {}); @@ -242,7 +244,7 @@ private void assertSingleFileLog( Path summaryLogFile = tempDir.resolve(logFileName); assertTrue(Files.exists(summaryLogFile)); - String logContent = Files.readString(summaryLogFile); + String logContent = new String(Files.readAllBytes(summaryLogFile), StandardCharsets.UTF_8); DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); List logEntries = objectMapper.readValue(logContent, new TypeReference>() {}); diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java index 70e2937936..800ae4e97a 100644 --- a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/log/SplitByDataChunkImportLoggerTest.java @@ -13,6 +13,7 @@ import com.scalar.db.dataloader.core.dataimport.task.result.ImportTaskResult; import com.scalar.db.dataloader.core.dataimport.transactionbatch.ImportTransactionBatchResult; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.time.Instant; @@ -131,7 +132,9 @@ private void testTransactionBatchCompleted(boolean success, boolean logSuccessRe private void assertTransactionBatchResult( ImportTransactionBatchResult expected, Path dataChunkLogFileName) throws IOException { - String logContent = Files.readString(dataChunkLogFileName); + // String logContent = Files.readString(dataChunkLogFileName); + String logContent = + new String(Files.readAllBytes(dataChunkLogFileName), StandardCharsets.UTF_8); DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); List logEntries = objectMapper.readValue( @@ -214,7 +217,8 @@ private void assertDataChunkStatusLog( Path dataChunkLogFile = tempDir.resolve(logFileName); assertTrue(Files.exists(dataChunkLogFile), "Data chunk summary log file should exist"); - String logContent = Files.readString(dataChunkLogFile); + // String logContent = Files.readString(dataChunkLogFile); + String logContent = new String(Files.readAllBytes(dataChunkLogFile), StandardCharsets.UTF_8); DataLoaderObjectMapper objectMapper = new DataLoaderObjectMapper(); List logEntries = objectMapper.readValue(logContent, new TypeReference>() {}); From 8c96c1aacb3adc8b18c1fee271bf925e64e8bb78 Mon Sep 17 00:00:00 2001 From: Vincent Guilpain Date: Mon, 20 Jan 2025 13:41:10 +0900 Subject: [PATCH 11/18] Fix missing unit tests for MariaDB RdbEngine (#2465) --- .../com/scalar/db/storage/jdbc/JdbcAdminTest.java | 6 +++++- .../com/scalar/db/storage/jdbc/RdbEngine.java | 5 ++++- .../db/storage/jdbc/query/QueryBuilderTest.java | 15 +++++++++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java index 1a89820777..51c5ca5193 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java @@ -83,7 +83,9 @@ public class JdbcAdminTest { RdbEngine.SQLITE, new RdbEngineSqlite(), RdbEngine.YUGABYTE, - new RdbEngineYugabyte()); + new RdbEngineYugabyte(), + RdbEngine.MARIADB, + new RdbEngineMariaDB()); @Mock private BasicDataSource dataSource; @Mock private Connection connection; @@ -119,6 +121,7 @@ private JdbcAdmin createJdbcAdminFor(RdbEngineStrategy rdbEngineStrategy) { private void mockUndefinedTableError(RdbEngine rdbEngine, SQLException sqlException) { switch (rdbEngine) { case MYSQL: + case MARIADB: when(sqlException.getErrorCode()).thenReturn(1049); break; case POSTGRESQL: @@ -3450,6 +3453,7 @@ private SQLException prepareDuplicatedKeyException(RdbEngine rdbEngine) { case SQL_SERVER: case MYSQL: case ORACLE: + case MARIADB: duplicateKeyException = mock(SQLException.class); when(duplicateKeyException.getSQLState()).thenReturn("23000"); break; diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngine.java b/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngine.java index 7b5dde067c..daa66ad107 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngine.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngine.java @@ -11,7 +11,8 @@ public enum RdbEngine { ORACLE, SQL_SERVER, SQLITE, - YUGABYTE; + YUGABYTE, + MARIADB; public static RdbEngineStrategy createRdbEngineStrategy(RdbEngine rdbEngine) { switch (rdbEngine) { @@ -27,6 +28,8 @@ public static RdbEngineStrategy createRdbEngineStrategy(RdbEngine rdbEngine) { return new RdbEngineSqlite(); case YUGABYTE: return new RdbEngineYugabyte(); + case MARIADB: + return new RdbEngineMariaDB(); default: throw new AssertionError(); } diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/query/QueryBuilderTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/query/QueryBuilderTest.java index 1a0171329a..eae50a6278 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/query/QueryBuilderTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/query/QueryBuilderTest.java @@ -334,6 +334,7 @@ public void selectQueryTest(RdbEngine rdbEngineType) throws SQLException { case MYSQL: case POSTGRESQL: case YUGABYTE: + case MARIADB: expectedQuery = "SELECT c1,c2 FROM n1.t1 WHERE p1=? AND c1>=? AND c1<=? " + "ORDER BY c1 ASC,c2 DESC LIMIT 10"; @@ -1300,6 +1301,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT INTO n1.t1 (p1,v1,v2,v3) VALUES (?,?,?,?)" + " ON DUPLICATE KEY UPDATE v1=?,v2=?,v3=?"; @@ -1340,6 +1342,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); verify(preparedStatement).setString(2, "v1Value"); verify(preparedStatement).setString(3, "v2Value"); @@ -1364,6 +1367,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT INTO n1.t1 (p1,c1,v1,v2,v3) VALUES (?,?,?,?,?)" + " ON DUPLICATE KEY UPDATE v1=?,v2=?,v3=?"; @@ -1406,6 +1410,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); verify(preparedStatement).setString(2, "c1Value"); verify(preparedStatement).setString(3, "v1Value"); @@ -1434,6 +1439,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT INTO n1.t1 (p1,p2,c1,c2,v1,v2,v3,v4) VALUES (?,?,?,?,?,?,?,?)" + " ON DUPLICATE KEY UPDATE v1=?,v2=?,v3=?,v4=?"; @@ -1480,6 +1486,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); verify(preparedStatement).setString(2, "p2Value"); verify(preparedStatement).setString(3, "c1Value"); @@ -1518,6 +1525,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT INTO n1.t1 (p1,p2,c1,c2,v1,v2,v3,v4,v5) VALUES (?,?,?,?,?,?,?,?,?)" + " ON DUPLICATE KEY UPDATE v1=?,v2=?,v3=?,v4=?,v5=?"; @@ -1565,6 +1573,7 @@ public void upsertQueryTest(RdbEngine rdbEngineType) throws SQLException { case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); verify(preparedStatement).setString(2, "p2Value"); verify(preparedStatement).setString(3, "c1Value"); @@ -1617,6 +1626,7 @@ public void upsertQueryWithoutValuesTest(RdbEngine rdbEngineType) throws SQLExce preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT IGNORE INTO n1.t1 (p1) VALUES (?)"; break; case POSTGRESQL: @@ -1649,6 +1659,7 @@ public void upsertQueryWithoutValuesTest(RdbEngine rdbEngineType) throws SQLExce case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); break; case ORACLE: @@ -1661,6 +1672,7 @@ public void upsertQueryWithoutValuesTest(RdbEngine rdbEngineType) throws SQLExce preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT IGNORE INTO n1.t1 (p1,c1) VALUES (?,?)"; break; case POSTGRESQL: @@ -1698,6 +1710,7 @@ public void upsertQueryWithoutValuesTest(RdbEngine rdbEngineType) throws SQLExce case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); verify(preparedStatement).setString(2, "c1Value"); break; @@ -1713,6 +1726,7 @@ public void upsertQueryWithoutValuesTest(RdbEngine rdbEngineType) throws SQLExce preparedStatement = mock(PreparedStatement.class); switch (rdbEngineType) { case MYSQL: + case MARIADB: expectedQuery = "INSERT IGNORE INTO n1.t1 (p1,p2,c1,c2) VALUES (?,?,?,?)"; break; case POSTGRESQL: @@ -1754,6 +1768,7 @@ public void upsertQueryWithoutValuesTest(RdbEngine rdbEngineType) throws SQLExce case POSTGRESQL: case SQLITE: case YUGABYTE: + case MARIADB: verify(preparedStatement).setString(1, "p1Value"); verify(preparedStatement).setString(2, "p2Value"); verify(preparedStatement).setString(3, "c1Value"); From c021b2614cff8b521b06895ee59e8a0e63317c1a Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Mon, 20 Jan 2025 15:32:04 +0900 Subject: [PATCH 12/18] Add name to NamespacePolicy and TablePolicy (#2466) --- .../java/com/scalar/db/api/AbacAdmin.java | 76 ++++++++++--------- .../DecoratedDistributedTransactionAdmin.java | 41 +++++----- 2 files changed, 60 insertions(+), 57 deletions(-) diff --git a/core/src/main/java/com/scalar/db/api/AbacAdmin.java b/core/src/main/java/com/scalar/db/api/AbacAdmin.java index 4ae41b7cfa..287b82c724 100644 --- a/core/src/main/java/com/scalar/db/api/AbacAdmin.java +++ b/core/src/main/java/com/scalar/db/api/AbacAdmin.java @@ -339,51 +339,48 @@ default Optional getUserTagInfo(String policyName, String username) } /** - * Applies the given policy to the given namespace. + * Creates a namespace policy with the given policy and the given namespace. * + * @param namespacePolicyName the namespace policy name * @param policyName the policy name * @param namespaceName the namespace name * @throws ExecutionException if the operation fails */ - default void applyPolicyToNamespace(String policyName, String namespaceName) + default void createNamespacePolicy( + String namespacePolicyName, String policyName, String namespaceName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } /** - * Enables the given policy for the given namespace. + * Enables a namespace policy that has the given name. * - * @param policyName the policy name - * @param namespaceName the namespace name + * @param namespacePolicyName the namespace policy name * @throws ExecutionException if the operation fails */ - default void enableNamespacePolicy(String policyName, String namespaceName) - throws ExecutionException { + default void enableNamespacePolicy(String namespacePolicyName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } /** - * Disables the given policy for the given namespace. + * Disables a namespace policy that has the given name. * - * @param policyName the policy name - * @param namespaceName the namespace name + * @param namespacePolicyName the namespace policy name * @throws ExecutionException if the operation fails */ - default void disableNamespacePolicy(String policyName, String namespaceName) - throws ExecutionException { + default void disableNamespacePolicy(String namespacePolicyName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } /** - * Retrieves the namespace policy for the given namespace. + * Retrieves a namespace policy that has the given name. * - * @param policyName the policy name - * @param namespaceName the namespace name + * @param namespacePolicyName the namespace policy name * @return the namespace policy. If the policy is not applied to the namespace, returns an empty * optional * @throws ExecutionException if the operation fails */ - default Optional getNamespacePolicy(String policyName, String namespaceName) + default Optional getNamespacePolicy(String namespacePolicyName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } @@ -399,55 +396,48 @@ default List getNamespacePolicies() throws ExecutionException { } /** - * Applies the given policy to the given table of the given namespace. + * Creates a table policy with the given policy and the given table. * + * @param tablePolicyName the table policy name * @param policyName the policy name * @param namespaceName the namespace name * @param tableName the table name * @throws ExecutionException if the operation fails */ - default void applyPolicyToTable(String policyName, String namespaceName, String tableName) + default void createTablePolicy( + String tablePolicyName, String policyName, String namespaceName, String tableName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } /** - * Enables the given policy of the given table of the given namespace. + * Enables a table policy that has the given name. * - * @param policyName the policy name - * @param namespaceName the namespace name - * @param tableName the table name + * @param tablePolicyName the table policy name * @throws ExecutionException if the operation fails */ - default void enableTablePolicy(String policyName, String namespaceName, String tableName) - throws ExecutionException { + default void enableTablePolicy(String tablePolicyName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } /** - * Disables the given policy of the given table of the given namespace. + * Disables a table policy that has the given name. * - * @param policyName the policy name - * @param namespaceName the namespace name - * @param tableName the table name + * @param tablePolicyName the table policy name * @throws ExecutionException if the operation fails */ - default void disableTablePolicy(String policyName, String namespaceName, String tableName) - throws ExecutionException { + default void disableTablePolicy(String tablePolicyName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } /** - * Retrieves the table policy for the given table of the given namespace. + * Retrieves a table policy that has the given name. * - * @param policyName the policy name - * @param namespaceName the namespace name - * @param tableName the table name + * @param tablePolicyName the table policy name * @return the table policy. If the policy is not applied to the table, returns an empty optional * @throws ExecutionException if the operation fails */ - default Optional getTablePolicy( - String policyName, String namespaceName, String tableName) throws ExecutionException { + default Optional getTablePolicy(String tablePolicyName) throws ExecutionException { throw new UnsupportedOperationException(CoreError.ABAC_NOT_ENABLED.buildMessage()); } @@ -731,6 +721,13 @@ interface GroupInfo { /** The namespace policy. */ interface NamespacePolicy { + /** + * Returns the namespace policy name. + * + * @return the namespace policy name + */ + String getName(); + /** * Returns the policy name. * @@ -755,6 +752,13 @@ interface NamespacePolicy { /** The table policy. */ interface TablePolicy { + /** + * Returns the table policy name. + * + * @return the table policy name + */ + String getName(); + /** * Returns the policy name. * diff --git a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java index 518fd40549..cf3657b419 100644 --- a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java @@ -492,27 +492,27 @@ public Optional getUserTagInfo(String policyName, String username) } @Override - public void applyPolicyToNamespace(String policyName, String namespaceName) + public void createNamespacePolicy( + String namespacePolicyName, String policyName, String namespaceName) throws ExecutionException { - distributedTransactionAdmin.applyPolicyToNamespace(policyName, namespaceName); + distributedTransactionAdmin.createNamespacePolicy( + namespacePolicyName, policyName, namespaceName); } @Override - public void enableNamespacePolicy(String policyName, String namespaceName) - throws ExecutionException { - distributedTransactionAdmin.enableNamespacePolicy(policyName, namespaceName); + public void enableNamespacePolicy(String namespacePolicyName) throws ExecutionException { + distributedTransactionAdmin.enableNamespacePolicy(namespacePolicyName); } @Override - public void disableNamespacePolicy(String policyName, String namespaceName) - throws ExecutionException { - distributedTransactionAdmin.disableNamespacePolicy(policyName, namespaceName); + public void disableNamespacePolicy(String namespacePolicyName) throws ExecutionException { + distributedTransactionAdmin.disableNamespacePolicy(namespacePolicyName); } @Override - public Optional getNamespacePolicy(String policyName, String namespaceName) + public Optional getNamespacePolicy(String namespacePolicyName) throws ExecutionException { - return distributedTransactionAdmin.getNamespacePolicy(policyName, namespaceName); + return distributedTransactionAdmin.getNamespacePolicy(namespacePolicyName); } @Override @@ -521,27 +521,26 @@ public List getNamespacePolicies() throws ExecutionException { } @Override - public void applyPolicyToTable(String policyName, String namespaceName, String tableName) + public void createTablePolicy( + String tablePolicyName, String policyName, String namespaceName, String tableName) throws ExecutionException { - distributedTransactionAdmin.applyPolicyToTable(policyName, namespaceName, tableName); + distributedTransactionAdmin.createTablePolicy( + tablePolicyName, policyName, namespaceName, tableName); } @Override - public void enableTablePolicy(String policyName, String namespaceName, String tableName) - throws ExecutionException { - distributedTransactionAdmin.enableTablePolicy(policyName, namespaceName, tableName); + public void enableTablePolicy(String tablePolicyName) throws ExecutionException { + distributedTransactionAdmin.enableTablePolicy(tablePolicyName); } @Override - public void disableTablePolicy(String policyName, String namespaceName, String tableName) - throws ExecutionException { - distributedTransactionAdmin.disableTablePolicy(policyName, namespaceName, tableName); + public void disableTablePolicy(String tablePolicyName) throws ExecutionException { + distributedTransactionAdmin.disableTablePolicy(tablePolicyName); } @Override - public Optional getTablePolicy( - String policyName, String namespaceName, String tableName) throws ExecutionException { - return distributedTransactionAdmin.getTablePolicy(policyName, namespaceName, tableName); + public Optional getTablePolicy(String tablePolicyName) throws ExecutionException { + return distributedTransactionAdmin.getTablePolicy(tablePolicyName); } @Override From 029ad093058acaed24a9d2f2636480fb8c4f1d1d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 14:18:50 +0900 Subject: [PATCH 13/18] Bump the dependencies group across 1 directory with 5 updates (#2472) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/build.gradle b/build.gradle index 069cd7e345..7ae4996483 100644 --- a/build.gradle +++ b/build.gradle @@ -25,22 +25,22 @@ subprojects { guavaVersion = '32.1.3-jre' slf4jVersion = '1.7.36' cassandraDriverVersion = '3.11.5' - azureCosmosVersion = '4.65.0' + azureCosmosVersion = '4.66.0' jooqVersion = '3.14.16' - awssdkVersion = '2.29.1' + awssdkVersion = '2.30.2' commonsDbcp2Version = '2.13.0' mysqlDriverVersion = '8.4.0' - postgresqlDriverVersion = '42.7.4' + postgresqlDriverVersion = '42.7.5' oracleDriverVersion = '21.16.0.0' sqlserverDriverVersion = '11.2.3.jre8' - sqliteDriverVersion = '3.47.2.0' + sqliteDriverVersion = '3.48.0.0' yugabyteDriverVersion = '42.7.3-yb-2' mariadDbDriverVersion = '3.5.1' picocliVersion = '4.7.6' commonsTextVersion = '1.13.0' junitVersion = '5.11.4' commonsLangVersion = '3.17.0' - assertjVersion = '3.27.2' + assertjVersion = '3.27.3' mockitoVersion = '4.11.0' spotbugsVersion = '4.8.6' errorproneVersion = '2.10.0' From 86c26d3416cd4bd44d87ddd32eac897ad1321a6a Mon Sep 17 00:00:00 2001 From: Vincent Guilpain Date: Tue, 21 Jan 2025 16:53:11 +0900 Subject: [PATCH 14/18] Add time related types (#2468) --- build.gradle | 4 +- .../CassandraAdminIntegrationTest.java | 5 + .../CassandraAdminRepairIntegrationTest.java | 5 + .../CassandraColumnValueIntegrationTest.java | 5 + ...draConditionalMutationIntegrationTest.java | 5 + ...ndraCrossPartitionScanIntegrationTest.java | 5 + ...tipleClusteringKeyScanIntegrationTest.java | 10 + ...raMultiplePartitionKeyIntegrationTest.java | 10 + .../CassandraSchemaLoaderIntegrationTest.java | 5 + ...assandraSecondaryIndexIntegrationTest.java | 10 + ...ingleClusteringKeyScanIntegrationTest.java | 10 + ...ndraSinglePartitionKeyIntegrationTest.java | 10 + ...mmitAdminIntegrationTestWithCassandra.java | 5 + ...minRepairIntegrationTestWithCassandra.java | 5 + ...susCommitIntegrationTestWithCassandra.java | 5 + ...tionAdminIntegrationTestWithCassandra.java | 5 + ...ansactionIntegrationTestWithCassandra.java | 5 + ...susCommitIntegrationTestWithCassandra.java | 5 + ...tAdminRepairIntegrationTestWithCosmos.java | 2 +- .../CosmosAdminRepairIntegrationTest.java | 2 +- ...tipleClusteringKeyScanIntegrationTest.java | 22 +- ...ingleClusteringKeyScanIntegrationTest.java | 8 +- ...tipleClusteringKeyScanIntegrationTest.java | 22 +- ...ingleClusteringKeyScanIntegrationTest.java | 8 +- .../db/storage/dynamo/DynamoTestUtils.java | 3 +- ...tTableIntegrationTestWithJdbcDatabase.java | 7 +- .../JdbcAdminImportTableIntegrationTest.java | 6 +- .../jdbc/JdbcAdminImportTestUtils.java | 540 ++++++++++---- ...baseCrossPartitionScanIntegrationTest.java | 26 + ...tipleClusteringKeyScanIntegrationTest.java | 12 + ...seMultiplePartitionKeyIntegrationTest.java | 25 +- ...ingleClusteringKeyScanIntegrationTest.java | 12 + ...baseSinglePartitionKeyIntegrationTest.java | 19 +- ...JdbcSchemaLoaderImportIntegrationTest.java | 120 +++- .../scalar/db/storage/jdbc/JdbcTestUtils.java | 35 +- ...ltiStorageSchemaLoaderIntegrationTest.java | 5 + .../main/java/com/scalar/db/api/Admin.java | 23 +- .../com/scalar/db/api/ConditionBuilder.java | 314 ++++++++ .../scalar/db/api/ConditionalExpression.java | 40 ++ .../db/api/DistributedStorageAdmin.java | 23 +- .../java/com/scalar/db/api/InsertBuilder.java | 56 ++ .../com/scalar/db/api/OperationBuilder.java | 40 ++ core/src/main/java/com/scalar/db/api/Put.java | 56 ++ .../java/com/scalar/db/api/PutBuilder.java | 56 ++ .../main/java/com/scalar/db/api/Result.java | 43 ++ .../java/com/scalar/db/api/UpdateBuilder.java | 56 ++ .../java/com/scalar/db/api/UpsertBuilder.java | 56 ++ .../com/scalar/db/common/AbstractResult.java | 22 +- .../CheckedDistributedStorageAdmin.java | 13 +- .../DecoratedDistributedTransactionAdmin.java | 8 +- .../com/scalar/db/common/ProjectedResult.java | 32 + .../java/com/scalar/db/common/ResultImpl.java | 40 ++ .../db/common/checker/ColumnChecker.java | 60 ++ .../com/scalar/db/common/error/CoreError.java | 42 ++ .../main/java/com/scalar/db/io/Column.java | 49 +- .../java/com/scalar/db/io/ColumnVisitor.java | 8 + .../main/java/com/scalar/db/io/DataType.java | 6 +- .../java/com/scalar/db/io/DateColumn.java | 129 ++++ core/src/main/java/com/scalar/db/io/Key.java | 144 ++++ .../java/com/scalar/db/io/TimeColumn.java | 133 ++++ .../com/scalar/db/io/TimestampColumn.java | 138 ++++ .../com/scalar/db/io/TimestampTZColumn.java | 141 ++++ .../com/scalar/db/service/AdminService.java | 13 +- .../db/storage/cassandra/CassandraAdmin.java | 31 +- .../cassandra/DeleteStatementHandler.java | 8 +- .../cassandra/InsertStatementHandler.java | 9 +- .../storage/cassandra/ResultInterpreter.java | 20 + .../cassandra/SelectStatementHandler.java | 4 +- .../cassandra/UpdateStatementHandler.java | 8 +- .../db/storage/cassandra/ValueBinder.java | 45 ++ .../storage/cosmos/ConcatenationVisitor.java | 29 + .../scalar/db/storage/cosmos/CosmosAdmin.java | 17 +- .../cosmos/CosmosOperationChecker.java | 16 + .../scalar/db/storage/cosmos/MapVisitor.java | 33 + .../db/storage/cosmos/ResultInterpreter.java | 30 + .../scalar/db/storage/cosmos/ValueBinder.java | 41 ++ .../scalar/db/storage/dynamo/DynamoAdmin.java | 21 +- .../db/storage/dynamo/ResultInterpreter.java | 27 + .../scalar/db/storage/dynamo/ValueBinder.java | 61 ++ .../dynamo/bytes/BigIntBytesEncoder.java | 12 +- .../storage/dynamo/bytes/BytesEncoders.java | 4 + .../db/storage/dynamo/bytes/BytesUtils.java | 12 + .../dynamo/bytes/DateBytesEncoder.java | 30 + .../KeyBytesEncodedLengthCalculator.java | 32 + .../storage/dynamo/bytes/KeyBytesEncoder.java | 26 + .../dynamo/bytes/TimeBytesEncoder.java | 30 + .../dynamo/bytes/TimestampBytesEncoder.java | 27 + .../dynamo/bytes/TimestampTZBytesEncoder.java | 27 + .../db/storage/jdbc/AbstractRdbEngine.java | 38 + .../com/scalar/db/storage/jdbc/JdbcAdmin.java | 19 +- .../scalar/db/storage/jdbc/JdbcConfig.java | 26 +- .../scalar/db/storage/jdbc/JdbcService.java | 7 +- .../com/scalar/db/storage/jdbc/JdbcUtils.java | 17 +- .../db/storage/jdbc/RdbEngineMariaDB.java | 13 +- .../db/storage/jdbc/RdbEngineMysql.java | 67 +- .../db/storage/jdbc/RdbEngineOracle.java | 63 +- .../db/storage/jdbc/RdbEnginePostgresql.java | 57 +- .../db/storage/jdbc/RdbEngineSqlServer.java | 62 +- .../db/storage/jdbc/RdbEngineSqlite.java | 62 +- .../db/storage/jdbc/RdbEngineStrategy.java | 73 +- .../storage/jdbc/RdbEngineTimeTypeMysql.java | 31 + .../storage/jdbc/RdbEngineTimeTypeOracle.java | 35 + .../jdbc/RdbEngineTimeTypePostgresql.java | 29 + .../jdbc/RdbEngineTimeTypeSqlServer.java | 38 + .../storage/jdbc/RdbEngineTimeTypeSqlite.java | 30 + .../jdbc/RdbEngineTimeTypeStrategy.java | 25 + .../db/storage/jdbc/ResultInterpreter.java | 33 +- .../db/storage/jdbc/query/DeleteQuery.java | 4 +- .../query/InsertOnConflictDoUpdateQuery.java | 8 +- .../InsertOnDuplicateKeyUpdateQuery.java | 4 +- .../db/storage/jdbc/query/InsertQuery.java | 4 +- .../db/storage/jdbc/query/MergeIntoQuery.java | 4 +- .../db/storage/jdbc/query/MergeQuery.java | 4 +- .../jdbc/query/PreparedStatementBinder.java | 61 +- .../storage/jdbc/query/SimpleSelectQuery.java | 6 +- .../db/storage/jdbc/query/UpdateQuery.java | 4 +- .../multistorage/MultiStorageAdmin.java | 13 +- .../consensuscommit/ConsensusCommitAdmin.java | 8 +- .../consensuscommit/FilteredResult.java | 32 + .../consensuscommit/MergedResult.java | 64 ++ .../consensuscommit/TransactionResult.java | 28 + .../jdbc/JdbcTransactionAdmin.java | 8 +- .../SingleCrudOperationTransactionAdmin.java | 8 +- .../com/scalar/db/util/ScalarDbUtils.java | 6 + .../util/TimeRelatedColumnEncodingUtils.java | 112 +++ .../scalar/db/api/ConditionBuilderTest.java | 405 ++++++++++- .../db/api/ConditionalExpressionTest.java | 34 + .../com/scalar/db/api/InsertBuilderTest.java | 56 +- .../com/scalar/db/api/PutBuilderTest.java | 79 ++- .../test/java/com/scalar/db/api/PutTest.java | 129 ++++ .../com/scalar/db/api/UpdateBuilderTest.java | 62 +- .../com/scalar/db/api/UpsertBuilderTest.java | 58 +- .../com/scalar/db/common/ResultImplTest.java | 78 +- .../com/scalar/db/io/BigIntColumnTest.java | 15 + .../java/com/scalar/db/io/BlobColumnTest.java | 20 + .../com/scalar/db/io/BooleanColumnTest.java | 15 + .../java/com/scalar/db/io/DateColumnTest.java | 137 ++++ .../com/scalar/db/io/DoubleColumnTest.java | 15 + .../com/scalar/db/io/FloatColumnTest.java | 15 + .../java/com/scalar/db/io/IntColumnTest.java | 15 + .../test/java/com/scalar/db/io/KeyTest.java | 162 ++++- .../java/com/scalar/db/io/TextColumnTest.java | 15 + .../java/com/scalar/db/io/TimeColumnTest.java | 155 ++++ .../com/scalar/db/io/TimestampColumnTest.java | 170 +++++ .../scalar/db/io/TimestampTZColumnTest.java | 165 +++++ .../TimeRelatedColumnEncodingUtilsTest.java | 296 ++++++++ .../storage/cassandra/CassandraAdminTest.java | 30 +- .../cassandra/ResultInterpreterTest.java | 297 +++++--- .../db/storage/cassandra/ValueBinderTest.java | 118 +++ .../cosmos/ConcatenationVisitorTest.java | 68 +- .../db/storage/cosmos/CosmosAdminTest.java | 58 +- .../db/storage/cosmos/MapVisitorTest.java | 100 +++ .../storage/cosmos/ResultInterpreterTest.java | 283 +++++--- .../db/storage/cosmos/ValueBinderTest.java | 319 +++++++++ .../storage/dynamo/DynamoAdminTestBase.java | 25 +- .../storage/dynamo/ResultInterpreterTest.java | 300 +++++--- .../db/storage/dynamo/ValueBinderTest.java | 403 +++++++++++ .../scalar/db/storage/jdbc/JdbcAdminTest.java | 150 +++- .../db/storage/jdbc/JdbcConfigTest.java | 7 + .../scalar/db/storage/jdbc/JdbcUtilsTest.java | 45 ++ .../scalar/db/storage/jdbc/RdbEngineTest.java | 132 +++- .../storage/jdbc/ResultInterpreterTest.java | 297 +++++--- .../multistorage/MultiStorageAdminTest.java | 30 + .../ConsensusCommitAdminTestBase.java | 8 +- .../jdbc/JdbcTransactionAdminTest.java | 4 +- ...ngleCrudOperationTransactionAdminTest.java | 6 +- ...geAdminImportTableIntegrationTestBase.java | 131 +++- ...ibutedStorageAdminIntegrationTestBase.java | 114 ++- ...StorageAdminRepairIntegrationTestBase.java | 82 ++- ...StorageColumnValueIntegrationTestBase.java | 412 ++++++++++- ...onditionalMutationIntegrationTestBase.java | 384 +++++++--- ...CrossPartitionScanIntegrationTestBase.java | 94 ++- ...eClusteringKeyScanIntegrationTestBase.java | 65 +- ...ltiplePartitionKeyIntegrationTestBase.java | 22 +- ...rageSecondaryIndexIntegrationTestBase.java | 8 +- ...eClusteringKeyScanIntegrationTestBase.java | 6 +- ...SinglePartitionKeyIntegrationTestBase.java | 14 +- ...onAdminImportTableIntegrationTestBase.java | 109 ++- ...edTransactionAdminIntegrationTestBase.java | 113 ++- ...sactionAdminRepairIntegrationTestBase.java | 82 ++- ...ributedTransactionIntegrationTestBase.java | 637 +++++++++++------ ...eCommitTransactionIntegrationTestBase.java | 671 +++++++++++------- ...SchemaLoaderImportIntegrationTestBase.java | 17 +- .../SchemaLoaderIntegrationTestBase.java | 109 +-- ...erationTransactionIntegrationTestBase.java | 11 +- .../java/com/scalar/db/util/TestUtils.java | 109 ++- .../sample/import_schema_sample.json | 6 +- .../db/schemaloader/ImportTableSchema.java | 38 +- .../db/schemaloader/SchemaOperator.java | 8 +- .../scalar/db/schemaloader/TableSchema.java | 6 +- .../schemaloader/ImportTableSchemaTest.java | 78 +- .../db/schemaloader/SchemaOperatorTest.java | 8 +- .../db/schemaloader/TableSchemaTest.java | 16 +- 193 files changed, 10740 insertions(+), 1818 deletions(-) create mode 100644 core/src/main/java/com/scalar/db/io/DateColumn.java create mode 100644 core/src/main/java/com/scalar/db/io/TimeColumn.java create mode 100644 core/src/main/java/com/scalar/db/io/TimestampColumn.java create mode 100644 core/src/main/java/com/scalar/db/io/TimestampTZColumn.java create mode 100644 core/src/main/java/com/scalar/db/storage/dynamo/bytes/DateBytesEncoder.java create mode 100644 core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimeBytesEncoder.java create mode 100644 core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampBytesEncoder.java create mode 100644 core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampTZBytesEncoder.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/AbstractRdbEngine.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeMysql.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeOracle.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypePostgresql.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlServer.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlite.java create mode 100644 core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeStrategy.java create mode 100644 core/src/main/java/com/scalar/db/util/TimeRelatedColumnEncodingUtils.java create mode 100644 core/src/test/java/com/scalar/db/io/DateColumnTest.java create mode 100644 core/src/test/java/com/scalar/db/io/TimeColumnTest.java create mode 100644 core/src/test/java/com/scalar/db/io/TimestampColumnTest.java create mode 100644 core/src/test/java/com/scalar/db/io/TimestampTZColumnTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/TimeRelatedColumnEncodingUtilsTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/cosmos/ValueBinderTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/dynamo/ValueBinderTest.java diff --git a/build.gradle b/build.gradle index 7ae4996483..1b78bd940f 100644 --- a/build.gradle +++ b/build.gradle @@ -31,8 +31,8 @@ subprojects { commonsDbcp2Version = '2.13.0' mysqlDriverVersion = '8.4.0' postgresqlDriverVersion = '42.7.5' - oracleDriverVersion = '21.16.0.0' - sqlserverDriverVersion = '11.2.3.jre8' + oracleDriverVersion = '23.6.0.24.10' + sqlserverDriverVersion = '12.8.1.jre8' sqliteDriverVersion = '3.48.0.0' yugabyteDriverVersion = '42.7.3-yb-2' mariadDbDriverVersion = '3.5.1' diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminIntegrationTest.java index f271bd47c2..119b2ca2a3 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminIntegrationTest.java @@ -21,4 +21,9 @@ protected Map getCreationOptions() { protected AdminTestUtils getAdminTestUtils(String testName) { return new CassandraAdminTestUtils(getProperties(testName)); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminRepairIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminRepairIntegrationTest.java index 99514b2b1c..5246591bb9 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminRepairIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraAdminRepairIntegrationTest.java @@ -28,4 +28,9 @@ protected void initialize(String testName) { admin = new CassandraAdmin(clusterManager, new DatabaseConfig(properties)); adminTestUtils = new CassandraAdminTestUtils(properties, clusterManager); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraColumnValueIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraColumnValueIntegrationTest.java index 63acda2800..ff76c606bb 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraColumnValueIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraColumnValueIntegrationTest.java @@ -16,4 +16,9 @@ protected Properties getProperties(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraConditionalMutationIntegrationTest.java index 2399eede81..7db0db8f24 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraConditionalMutationIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraConditionalMutationIntegrationTest.java @@ -32,4 +32,9 @@ protected boolean shouldMutate( return super.shouldMutate(initialColumn, columnToCompare, operator); } } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraCrossPartitionScanIntegrationTest.java index d2ace88450..5b169fb1fe 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraCrossPartitionScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraCrossPartitionScanIntegrationTest.java @@ -24,4 +24,9 @@ protected Map getCreationOptions() { @Override @Disabled("Cross partition scan with ordering is not supported in Cassandra") public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {} + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultipleClusteringKeyScanIntegrationTest.java index 40a03c3f3c..f52826e0a0 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultipleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultipleClusteringKeyScanIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.cassandra; import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; +import com.scalar.db.io.DataType; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; public class CassandraMultipleClusteringKeyScanIntegrationTest extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { @@ -17,4 +20,11 @@ protected Properties getProperties(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected List getDataTypes() { + return super.getDataTypes().stream() + .filter(type -> type != DataType.TIMESTAMP) + .collect(Collectors.toList()); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultiplePartitionKeyIntegrationTest.java index 1ba990c543..49f2c87244 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultiplePartitionKeyIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraMultiplePartitionKeyIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.cassandra; import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; +import com.scalar.db.io.DataType; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; public class CassandraMultiplePartitionKeyIntegrationTest extends DistributedStorageMultiplePartitionKeyIntegrationTestBase { @@ -16,4 +19,11 @@ protected Properties getProperties(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected List getDataTypes() { + return super.getDataTypes().stream() + .filter(type -> type != DataType.TIMESTAMP) + .collect(Collectors.toList()); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSchemaLoaderIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSchemaLoaderIntegrationTest.java index 2b3a76c0a7..29315b5e81 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSchemaLoaderIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSchemaLoaderIntegrationTest.java @@ -54,4 +54,9 @@ protected List getCommandArgsForUpgrade(Path configFilePath) { .add("--replication-factor=1") .build(); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSecondaryIndexIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSecondaryIndexIntegrationTest.java index 6666ff0b83..19e640a8a1 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSecondaryIndexIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSecondaryIndexIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.cassandra; import com.scalar.db.api.DistributedStorageSecondaryIndexIntegrationTestBase; +import com.scalar.db.io.DataType; import java.util.Collections; import java.util.Map; import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; public class CassandraSecondaryIndexIntegrationTest extends DistributedStorageSecondaryIndexIntegrationTestBase { @@ -16,4 +19,11 @@ protected Properties getProperties(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected Set getSecondaryIndexTypes() { + return super.getSecondaryIndexTypes().stream() + .filter(type -> type != DataType.TIMESTAMP) + .collect(Collectors.toSet()); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSingleClusteringKeyScanIntegrationTest.java index 8dfce2a7b3..d8daa8b6bd 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSingleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSingleClusteringKeyScanIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.cassandra; import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; +import com.scalar.db.io.DataType; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; public class CassandraSingleClusteringKeyScanIntegrationTest extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { @@ -16,4 +19,11 @@ protected Properties getProperties(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected List getClusteringKeyTypes() { + return super.getClusteringKeyTypes().stream() + .filter(type -> type != DataType.TIMESTAMP) + .collect(Collectors.toList()); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSinglePartitionKeyIntegrationTest.java index d5c0389610..4d58e47186 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSinglePartitionKeyIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/CassandraSinglePartitionKeyIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.cassandra; import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; +import com.scalar.db.io.DataType; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; public class CassandraSinglePartitionKeyIntegrationTest extends DistributedStorageSinglePartitionKeyIntegrationTestBase { @@ -16,4 +19,11 @@ protected Properties getProperties(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected List getPartitionKeyTypes() { + return super.getPartitionKeyTypes().stream() + .filter(type -> type != DataType.TIMESTAMP) + .collect(Collectors.toList()); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminIntegrationTestWithCassandra.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminIntegrationTestWithCassandra.java index 25b02a4636..b0eb1c4bab 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminIntegrationTestWithCassandra.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminIntegrationTestWithCassandra.java @@ -22,4 +22,9 @@ protected Map getCreationOptions() { protected AdminTestUtils getAdminTestUtils(String testName) { return new CassandraAdminTestUtils(getProperties(testName)); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminRepairIntegrationTestWithCassandra.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminRepairIntegrationTestWithCassandra.java index 6ac91b5b4c..5a7ad0e31d 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminRepairIntegrationTestWithCassandra.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitAdminRepairIntegrationTestWithCassandra.java @@ -30,4 +30,9 @@ protected void initialize(String testName) { admin = new ConsensusCommitAdmin(storageAdmin, new DatabaseConfig(properties)); adminTestUtils = new CassandraAdminTestUtils(getProperties(testName), clusterManager); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitIntegrationTestWithCassandra.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitIntegrationTestWithCassandra.java index fc81a0b1dd..8c6eba8cd2 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitIntegrationTestWithCassandra.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/ConsensusCommitIntegrationTestWithCassandra.java @@ -16,4 +16,9 @@ protected Properties getProps(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionAdminIntegrationTestWithCassandra.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionAdminIntegrationTestWithCassandra.java index aa46a16da0..4669045a58 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionAdminIntegrationTestWithCassandra.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionAdminIntegrationTestWithCassandra.java @@ -17,4 +17,9 @@ protected Properties getProps(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionIntegrationTestWithCassandra.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionIntegrationTestWithCassandra.java index 141f8e6c24..6afc1e7748 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionIntegrationTestWithCassandra.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/SingleCrudOperationTransactionIntegrationTestWithCassandra.java @@ -17,4 +17,9 @@ protected Properties getProps(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cassandra/TwoPhaseConsensusCommitIntegrationTestWithCassandra.java b/core/src/integration-test/java/com/scalar/db/storage/cassandra/TwoPhaseConsensusCommitIntegrationTestWithCassandra.java index 2e86b7697c..b73b4179cb 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cassandra/TwoPhaseConsensusCommitIntegrationTestWithCassandra.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cassandra/TwoPhaseConsensusCommitIntegrationTestWithCassandra.java @@ -16,4 +16,9 @@ protected Properties getProps1(String testName) { protected Map getCreationOptions() { return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1"); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/cosmos/ConsensusCommitAdminRepairIntegrationTestWithCosmos.java b/core/src/integration-test/java/com/scalar/db/storage/cosmos/ConsensusCommitAdminRepairIntegrationTestWithCosmos.java index 4831239269..11d722ee33 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cosmos/ConsensusCommitAdminRepairIntegrationTestWithCosmos.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cosmos/ConsensusCommitAdminRepairIntegrationTestWithCosmos.java @@ -35,7 +35,7 @@ public void repairTable_ForTableWithoutStoredProcedure_ShouldCreateStoredProcedu cosmosAdminTestUtils.getTableStoredProcedure(getNamespace(), getTable()).delete(); // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThatCode( diff --git a/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosAdminRepairIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosAdminRepairIntegrationTest.java index 039f934aa2..fc1426f2ee 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosAdminRepairIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosAdminRepairIntegrationTest.java @@ -35,7 +35,7 @@ public void repairTable_ForTableWithoutStoredProcedure_ShouldCreateStoredProcedu cosmosAdminTestUtils.getTableStoredProcedure(getNamespace(), getTable()).delete(); // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThatCode( diff --git a/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosMultipleClusteringKeyScanIntegrationTest.java index ad97f1a7c3..882deee3b9 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosMultipleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosMultipleClusteringKeyScanIntegrationTest.java @@ -1,11 +1,11 @@ package com.scalar.db.storage.cosmos; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; import com.scalar.db.io.DataType; +import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; public class CosmosMultipleClusteringKeyScanIntegrationTest extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { @@ -16,21 +16,11 @@ protected Properties getProperties(String testName) { } @Override - protected ListMultimap getClusteringKeyTypes() { + protected List getDataTypes() { // Return types without BLOB because blob is not supported for clustering key for now - ListMultimap clusteringKeyTypes = ArrayListMultimap.create(); - for (DataType firstClusteringKeyType : DataType.values()) { - if (firstClusteringKeyType == DataType.BLOB) { - continue; - } - for (DataType secondClusteringKeyType : DataType.values()) { - if (secondClusteringKeyType == DataType.BLOB) { - continue; - } - clusteringKeyTypes.put(firstClusteringKeyType, secondClusteringKeyType); - } - } - return clusteringKeyTypes; + return super.getDataTypes().stream() + .filter(type -> type != DataType.BLOB) + .collect(Collectors.toList()); } @Override diff --git a/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosSingleClusteringKeyScanIntegrationTest.java index c3e77f4c1a..0e035c1db2 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosSingleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/cosmos/CosmosSingleClusteringKeyScanIntegrationTest.java @@ -2,10 +2,10 @@ import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; import com.scalar.db.io.DataType; -import java.util.HashSet; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.Set; public class CosmosSingleClusteringKeyScanIntegrationTest extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { @@ -15,9 +15,9 @@ protected Properties getProperties(String testName) { } @Override - protected Set getClusteringKeyTypes() { + protected List getClusteringKeyTypes() { // Return types without BLOB because blob is not supported for clustering key for now - Set clusteringKeyTypes = new HashSet<>(); + List clusteringKeyTypes = new ArrayList<>(); for (DataType dataType : DataType.values()) { if (dataType == DataType.BLOB) { continue; diff --git a/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoMultipleClusteringKeyScanIntegrationTest.java index 1b714d5a97..6a47db15ee 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoMultipleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoMultipleClusteringKeyScanIntegrationTest.java @@ -1,13 +1,13 @@ package com.scalar.db.storage.dynamo; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; +import java.util.stream.Collectors; public class DynamoMultipleClusteringKeyScanIntegrationTest extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { @@ -18,21 +18,11 @@ protected Properties getProperties(String testName) { } @Override - protected ListMultimap getClusteringKeyTypes() { + protected List getDataTypes() { // Return types without BLOB because blob is not supported for clustering key for now - ListMultimap clusteringKeyTypes = ArrayListMultimap.create(); - for (DataType firstClusteringKeyType : DataType.values()) { - if (firstClusteringKeyType == DataType.BLOB) { - continue; - } - for (DataType secondClusteringKeyType : DataType.values()) { - if (secondClusteringKeyType == DataType.BLOB) { - continue; - } - clusteringKeyTypes.put(firstClusteringKeyType, secondClusteringKeyType); - } - } - return clusteringKeyTypes; + return super.getDataTypes().stream() + .filter(type -> type != DataType.BLOB) + .collect(Collectors.toList()); } @Override diff --git a/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoSingleClusteringKeyScanIntegrationTest.java index 55a0f21f67..378b115af8 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoSingleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoSingleClusteringKeyScanIntegrationTest.java @@ -3,11 +3,11 @@ import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; -import java.util.HashSet; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; -import java.util.Set; public class DynamoSingleClusteringKeyScanIntegrationTest extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { @@ -18,9 +18,9 @@ protected Properties getProperties(String testName) { } @Override - protected Set getClusteringKeyTypes() { + protected List getClusteringKeyTypes() { // Return types without BLOB because blob is not supported for clustering key for now - Set clusteringKeyTypes = new HashSet<>(); + List clusteringKeyTypes = new ArrayList<>(); for (DataType dataType : DataType.values()) { if (dataType == DataType.BLOB) { continue; diff --git a/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoTestUtils.java index e3148e136e..cc84fa30a2 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoTestUtils.java +++ b/core/src/integration-test/java/com/scalar/db/storage/dynamo/DynamoTestUtils.java @@ -20,8 +20,7 @@ public static DoubleColumn getRandomDynamoDoubleColumn(Random random, String col public static double nextDynamoDouble(Random random) { return random - .doubles(MIN_DYNAMO_DOUBLE_VALUE, MAX_DYNAMO_DOUBLE_VALUE) - .limit(1) + .doubles(1, MIN_DYNAMO_DOUBLE_VALUE, MAX_DYNAMO_DOUBLE_VALUE) .findFirst() .orElse(0.0d); } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/ConsensusCommitAdminImportTableIntegrationTestWithJdbcDatabase.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/ConsensusCommitAdminImportTableIntegrationTestWithJdbcDatabase.java index 925f667014..d589c140cc 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/ConsensusCommitAdminImportTableIntegrationTestWithJdbcDatabase.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/ConsensusCommitAdminImportTableIntegrationTestWithJdbcDatabase.java @@ -1,10 +1,10 @@ package com.scalar.db.storage.jdbc; -import com.scalar.db.api.TableMetadata; +import com.scalar.db.api.DistributedStorageAdminImportTableIntegrationTestBase.TestData; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.transaction.consensuscommit.ConsensusCommitAdminImportTableIntegrationTestBase; import java.sql.SQLException; -import java.util.Map; +import java.util.List; import java.util.Properties; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.DisabledIf; @@ -44,8 +44,7 @@ public void afterAll() { } @Override - protected Map createExistingDatabaseWithAllDataTypes() - throws SQLException { + protected List createExistingDatabaseWithAllDataTypes() throws SQLException { return testUtils.createExistingDatabaseWithAllDataTypes(getNamespace()); } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTableIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTableIntegrationTest.java index 3055572108..3a27e8e633 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTableIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTableIntegrationTest.java @@ -1,10 +1,9 @@ package com.scalar.db.storage.jdbc; import com.scalar.db.api.DistributedStorageAdminImportTableIntegrationTestBase; -import com.scalar.db.api.TableMetadata; import com.scalar.db.exception.storage.ExecutionException; import java.sql.SQLException; -import java.util.Map; +import java.util.List; import java.util.Properties; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.DisabledIf; @@ -44,8 +43,7 @@ public void afterAll() { } @Override - protected Map createExistingDatabaseWithAllDataTypes() - throws SQLException { + protected List createExistingDatabaseWithAllDataTypes() throws SQLException { return testUtils.createExistingDatabaseWithAllDataTypes(getNamespace()); } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTestUtils.java index 237c7304e7..cba5eb9b75 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTestUtils.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcAdminImportTestUtils.java @@ -1,14 +1,39 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.DistributedStorageAdminImportTableIntegrationTestBase.TestData; +import com.scalar.db.api.Get; +import com.scalar.db.api.Insert; +import com.scalar.db.api.InsertBuilder; +import com.scalar.db.api.Put; +import com.scalar.db.api.PutBuilder; import com.scalar.db.api.TableMetadata; import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.Key; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.SQLException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; @@ -16,6 +41,7 @@ import java.util.Properties; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.annotation.Nullable; import org.apache.commons.dbcp2.BasicDataSource; public class JdbcAdminImportTestUtils { @@ -24,16 +50,12 @@ public class JdbcAdminImportTestUtils { Arrays.asList( "BIGINT UNSIGNED", "BIT(8)", - "DATE", - "DATETIME", "DECIMAL(8,2)", "ENUM('a','b')", "SET('a','b')", "GEOMETRY", "JSON", // we remove this for MariaDB because it is an alias of a supported type, LONGTEXT "NUMERIC", - "TIME", - "TIMESTAMP", "YEAR"); static final List UNSUPPORTED_DATA_TYPES_PGSQL = Arrays.asList( @@ -43,7 +65,6 @@ public class JdbcAdminImportTestUtils { "box", "cidr", "circle", - "date", "inet", "interval", "json", @@ -60,10 +81,7 @@ public class JdbcAdminImportTestUtils { "polygon", "serial", "smallserial", - "time", "time with time zone", - "timestamp", - "timestamp with time zone", "tsquery", "tsvector", "txid_snapshot", @@ -74,34 +92,24 @@ public class JdbcAdminImportTestUtils { static final List UNSUPPORTED_DATA_TYPES_ORACLE = Arrays.asList( "BFILE", - "DATE", "FLOAT(54)", "INT", "INTERVAL YEAR(3) TO MONTH", "INTERVAL DAY(2) TO SECOND", "NUMBER(16,0)", "ROWID", - "TIMESTAMP", - "TIMESTAMP WITH TIME ZONE", - "TIMESTAMP WITH LOCAL TIME ZONE", "UROWID"); static final List UNSUPPORTED_DATA_TYPES_ORACLE_V20_OR_LATER = Collections.singletonList("JSON"); static final List UNSUPPORTED_DATA_TYPES_MSSQL = Arrays.asList( - "date", - "datetime", - "datetime2", - "datetimeoffset", "decimal(8,2)", "hierarchyid", "money", "numeric(8,2)", "rowversion", - "smalldatetime", "smallmoney", "sql_variant", - "time", "uniqueidentifier", "xml", "geometry", @@ -118,7 +126,7 @@ public JdbcAdminImportTestUtils(Properties properties) { majorVersion = getMajorVersion(); } - public Map createExistingDatabaseWithAllDataTypes(String namespace) + public List createExistingDatabaseWithAllDataTypes(String namespace) throws SQLException { execute(rdbEngine.createSchemaSqls(namespace)); if (JdbcTestUtils.isMysql(rdbEngine)) { @@ -175,9 +183,18 @@ private LinkedHashMap prepareColumnsForMysql() { columns.put("col18", "TINYBLOB"); columns.put("col19", "MEDIUMBLOB"); columns.put("col20", "LONGBLOB"); - columns.put("col21", "BINARY(255)"); + columns.put("col21", "BINARY(5)"); + columns.put("col22", "DATE"); + columns.put("col23", "TIME(6)"); + columns.put("col24", "DATETIME(6)"); + columns.put("col25", "DATETIME(6)"); // override to TIMESTAMPTZ + // With MySQL 5.7, if a TIMESTAMP column is not explicitly declared with the NULL attribute, it + // is declared automatically with the NOT NULL attribute and assigned a default value equal to + // the current timestamp. + // cf. the "--explicit-defaults-for-timestamp" option documentation + columns.put("col26", "TIMESTAMP(6) NULL"); if (isMariaDB()) { - columns.put("col22", "JSON"); + columns.put("col27", "JSON"); } return columns; } @@ -208,14 +225,34 @@ private TableMetadata prepareTableMetadataForMysql() { .addColumn("col19", DataType.BLOB) .addColumn("col20", DataType.BLOB) .addColumn("col21", DataType.BLOB) + .addColumn("col22", DataType.DATE) + .addColumn("col23", DataType.TIME) + .addColumn("col24", DataType.TIMESTAMP) + .addColumn("col25", DataType.TIMESTAMPTZ) + .addColumn("col26", DataType.TIMESTAMPTZ) .addPartitionKey("pk1") .addPartitionKey("pk2"); if (isMariaDB()) { - builder.addColumn("col22", DataType.TEXT); + builder.addColumn("col27", DataType.TEXT); } return builder.build(); } + private Map> prepareInsertColumnsForMysql(TableMetadata metadata) { + ImmutableList.Builder> customColumns = new ImmutableList.Builder<>(); + customColumns.add( + TimestampTZColumn.of( + "col26", LocalDateTime.of(2005, 10, 11, 8, 35).toInstant(ZoneOffset.UTC))); + if (isMariaDB()) { + customColumns.add(TextColumn.of("col27", "{\"foo\": \"bar\"}")); + } + return prepareInsertColumnsWithGenericAndCustomValues(metadata, customColumns.build()); + } + + private Map prepareOverrideColumnsTypeForMysql() { + return ImmutableMap.of("col25", DataType.TIMESTAMPTZ); + } + private LinkedHashMap prepareColumnsForPostgresql() { LinkedHashMap columns = new LinkedHashMap<>(); columns.put("pk1", "integer"); @@ -226,10 +263,14 @@ private LinkedHashMap prepareColumnsForPostgresql() { columns.put("col04", "bigint"); columns.put("col05", "real"); columns.put("col06", "double precision"); - columns.put("col07", "char(8)"); + columns.put("col07", "char(3)"); columns.put("col08", "varchar(512)"); columns.put("col09", "text"); columns.put("col10", "bytea"); + columns.put("col11", "date"); + columns.put("col12", "time"); + columns.put("col13", "timestamp"); + columns.put("col14", "timestamp with time zone"); return columns; } @@ -247,6 +288,10 @@ private TableMetadata prepareTableMetadataForPostgresql() { .addColumn("col08", DataType.TEXT) .addColumn("col09", DataType.TEXT) .addColumn("col10", DataType.BLOB) + .addColumn("col11", DataType.DATE) + .addColumn("col12", DataType.TIME) + .addColumn("col13", DataType.TIMESTAMP) + .addColumn("col14", DataType.TIMESTAMPTZ) .addPartitionKey("pk1") .addPartitionKey("pk2") .build(); @@ -254,25 +299,37 @@ private TableMetadata prepareTableMetadataForPostgresql() { private LinkedHashMap prepareColumnsForOracle() { LinkedHashMap columns = new LinkedHashMap<>(); - columns.put("pk1", "CHAR(8)"); - columns.put("pk2", "CHAR(8)"); + columns.put("pk1", "CHAR(3)"); + columns.put("pk2", "CHAR(3)"); columns.put("col01", "NUMERIC(15,0)"); columns.put("col02", "NUMERIC(15,2)"); columns.put("col03", "FLOAT(53)"); columns.put("col04", "BINARY_FLOAT"); columns.put("col05", "BINARY_DOUBLE"); - columns.put("col06", "CHAR(8)"); + columns.put("col06", "CHAR(3)"); columns.put("col07", "VARCHAR2(512)"); - columns.put("col08", "NCHAR(8)"); + columns.put("col08", "NCHAR(3)"); columns.put("col09", "NVARCHAR2(512)"); columns.put("col10", "CLOB"); columns.put("col11", "NCLOB"); columns.put("col12", "LONG"); columns.put("col13", "BLOB"); columns.put("col14", "RAW(1024)"); + columns.put("col15", "DATE"); + columns.put("col16", "DATE"); // override to TIME + columns.put("col17", "DATE"); // override to TIMESTAMP + columns.put("col18", "TIMESTAMP"); + columns.put("col19", "TIMESTAMP"); // override to TIME + columns.put("col20", "TIMESTAMP WITH TIME ZONE"); + columns.put("col21", "TIMESTAMP WITH LOCAL TIME ZONE"); return columns; } + private Map prepareOverrideColumnsTypeForOracle() { + return ImmutableMap.of( + "col16", DataType.TIME, "col17", DataType.TIMESTAMP, "col19", DataType.TIME); + } + private TableMetadata prepareTableMetadataForOracle() { return TableMetadata.newBuilder() .addColumn("pk1", DataType.TEXT) @@ -291,15 +348,30 @@ private TableMetadata prepareTableMetadataForOracle() { .addColumn("col12", DataType.TEXT) .addColumn("col13", DataType.BLOB) .addColumn("col14", DataType.BLOB) + .addColumn("col15", DataType.DATE) + .addColumn("col16", DataType.TIME) + .addColumn("col17", DataType.TIMESTAMP) + .addColumn("col18", DataType.TIMESTAMP) + .addColumn("col19", DataType.TIME) + .addColumn("col20", DataType.TIMESTAMPTZ) + .addColumn("col21", DataType.TIMESTAMPTZ) .addPartitionKey("pk1") .addPartitionKey("pk2") .build(); } + private Map> prepareInsertColumnsForOracle(TableMetadata metadata) { + List> customColumns = + ImmutableList.of( + TimeColumn.of("col16", LocalTime.of(11, 8, 35)), + TimestampColumn.of("col17", LocalDateTime.of(1905, 10, 11, 8, 35))); + return prepareInsertColumnsWithGenericAndCustomValues(metadata, customColumns); + } + private LinkedHashMap prepareColumnsForOracleLongRaw() { LinkedHashMap columns = new LinkedHashMap<>(); - columns.put("pk1", "CHAR(8)"); - columns.put("pk2", "CHAR(8)"); + columns.put("pk1", "CHAR(3)"); + columns.put("pk2", "CHAR(3)"); columns.put("col", "LONG RAW"); return columns; } @@ -325,15 +397,22 @@ private LinkedHashMap prepareColumnsForSqlServer() { columns.put("col05", "bigint"); columns.put("col06", "real"); columns.put("col07", "float"); - columns.put("col08", "char(8)"); + columns.put("col08", "char(3)"); columns.put("col09", "varchar(512)"); - columns.put("col10", "nchar(8)"); + columns.put("col10", "nchar(3)"); columns.put("col11", "nvarchar(512)"); columns.put("col12", "text"); columns.put("col13", "ntext"); - columns.put("col14", "binary"); - columns.put("col15", "varbinary"); + columns.put("col14", "binary(5)"); + columns.put("col15", "varbinary(5)"); columns.put("col16", "image"); + columns.put("col17", "date"); + columns.put("col18", "time"); + columns.put("col19", "datetime"); + columns.put("col20", "datetime2"); + columns.put("col21", "smalldatetime"); + columns.put("col22", "datetimeoffset"); + return columns; } @@ -357,19 +436,35 @@ private TableMetadata prepareTableMetadataForSqlServer() { .addColumn("col14", DataType.BLOB) .addColumn("col15", DataType.BLOB) .addColumn("col16", DataType.BLOB) + .addColumn("col17", DataType.DATE) + .addColumn("col18", DataType.TIME) + .addColumn("col19", DataType.TIMESTAMP) + .addColumn("col20", DataType.TIMESTAMP) + .addColumn("col21", DataType.TIMESTAMP) + .addColumn("col22", DataType.TIMESTAMPTZ) .addPartitionKey("pk1") .addPartitionKey("pk2") .build(); } - private Map prepareCreateNonImportableTableSql( + private Map> prepareInsertColumnsForSqlServer(TableMetadata metadata) { + List> customColumns = + ImmutableList.of( + TimestampColumn.of("col19", LocalDateTime.of(1905, 10, 11, 8, 35, 14, 123_000_000)), + TimestampColumn.of("col21", LocalDateTime.of(1905, 10, 11, 8, 35))); + return prepareInsertColumnsWithGenericAndCustomValues(metadata, customColumns); + } + + private List prepareCreateNonImportableTableSql( String namespace, List types) { - Map tables = new HashMap<>(); + ImmutableList.Builder data = new ImmutableList.Builder<>(); for (int i = 0; i < types.size(); i++) { String table = "bad_table" + i; - tables.put(table, prepareCreateNonImportableTableSql(namespace, table, types.get(i))); + data.add( + JdbcTestData.createNonImportableTable( + table, prepareCreateNonImportableTableSql(namespace, table, types.get(i)))); } - return tables; + return data.build(); } private String prepareCreateNonImportableTableSql(String namespace, String table, String type) { @@ -396,51 +491,60 @@ private String prepareCreateTableSql( + "))"; } - private Map createExistingMysqlDatabaseWithAllDataTypes(String namespace) + private List createExistingMysqlDatabaseWithAllDataTypes(String namespace) throws SQLException { + List data = new ArrayList<>(); TableMetadata tableMetadata = prepareTableMetadataForMysql(); - Map supportedTables = - Collections.singletonMap( + String sql = + prepareCreateTableSql( + namespace, + SUPPORTED_TABLE_NAME, + prepareColumnsForMysql(), + tableMetadata.getPartitionKeyNames()); + data.add( + JdbcTestData.createImportableTable( SUPPORTED_TABLE_NAME, - prepareCreateTableSql( - namespace, - SUPPORTED_TABLE_NAME, - prepareColumnsForMysql(), - tableMetadata.getPartitionKeyNames())); - Map supportedTableMetadata = - Collections.singletonMap(SUPPORTED_TABLE_NAME, tableMetadata); - - Map unsupportedTables; + sql, + tableMetadata, + prepareOverrideColumnsTypeForMysql(), + prepareInsertColumnsForMysql(tableMetadata))); + if (isMariaDB()) { - unsupportedTables = + data.addAll( prepareCreateNonImportableTableSql( namespace, UNSUPPORTED_DATA_TYPES_MYSQL.stream() .filter(type -> !type.equalsIgnoreCase("JSON")) - .collect(Collectors.toList())); + .collect(Collectors.toList()))); } else { - unsupportedTables = - prepareCreateNonImportableTableSql(namespace, UNSUPPORTED_DATA_TYPES_MYSQL); + data.addAll(prepareCreateNonImportableTableSql(namespace, UNSUPPORTED_DATA_TYPES_MYSQL)); } - return executeCreateTableSql(supportedTables, supportedTableMetadata, unsupportedTables); + executeCreateTableSql(data); + + return ImmutableList.copyOf(data); } - private Map createExistingPostgresDatabaseWithAllDataTypes( - String namespace) throws SQLException { + private List createExistingPostgresDatabaseWithAllDataTypes(String namespace) + throws SQLException { + List data = new ArrayList<>(); + TableMetadata tableMetadata = prepareTableMetadataForPostgresql(); - Map supportedTables = - Collections.singletonMap( + String sql = + prepareCreateTableSql( + namespace, + SUPPORTED_TABLE_NAME, + prepareColumnsForPostgresql(), + tableMetadata.getPartitionKeyNames()); + data.add( + JdbcTestData.createImportableTable( SUPPORTED_TABLE_NAME, - prepareCreateTableSql( - namespace, - SUPPORTED_TABLE_NAME, - prepareColumnsForPostgresql(), - tableMetadata.getPartitionKeyNames())); - Map supportedTableMetadata = - Collections.singletonMap(SUPPORTED_TABLE_NAME, tableMetadata); - - Map unsupportedTables = + sql, + tableMetadata, + Collections.emptyMap(), + prepareInsertColumnsWithGenericValues(tableMetadata))); + + data.addAll( prepareCreateNonImportableTableSql( namespace, majorVersion >= 13 @@ -448,38 +552,50 @@ private Map createExistingPostgresDatabaseWithAllDataType UNSUPPORTED_DATA_TYPES_PGSQL.stream(), UNSUPPORTED_DATA_TYPES_PGSQL_V13_OR_LATER.stream()) .collect(Collectors.toList()) - : UNSUPPORTED_DATA_TYPES_PGSQL); + : UNSUPPORTED_DATA_TYPES_PGSQL)); + + executeCreateTableSql(data); - return executeCreateTableSql(supportedTables, supportedTableMetadata, unsupportedTables); + return ImmutableList.copyOf(data); } - private Map createExistingOracleDatabaseWithAllDataTypes(String namespace) + private List createExistingOracleDatabaseWithAllDataTypes(String namespace) throws SQLException { - Map supportedTables = new HashMap<>(); - Map supportedTableMetadata = new HashMap<>(); + List data = new ArrayList<>(); TableMetadata tableMetadata = prepareTableMetadataForOracle(); - supportedTables.put( - SUPPORTED_TABLE_NAME, + String sql = prepareCreateTableSql( namespace, SUPPORTED_TABLE_NAME, prepareColumnsForOracle(), - tableMetadata.getPartitionKeyNames())); - supportedTableMetadata.put(SUPPORTED_TABLE_NAME, tableMetadata); + tableMetadata.getPartitionKeyNames()); + data.add( + JdbcTestData.createImportableTable( + SUPPORTED_TABLE_NAME, + sql, + tableMetadata, + prepareOverrideColumnsTypeForOracle(), + prepareInsertColumnsForOracle(tableMetadata))); // LONG columns must be tested with separated tables since they cannot be coexisted TableMetadata longRawTableMetadata = prepareTableMetadataForOracleForLongRaw(); - supportedTables.put( - SUPPORTED_TABLE_NAME + "_long_raw", + String longRawSupportedTable = SUPPORTED_TABLE_NAME + "_long_raw"; + String longRawSql = prepareCreateTableSql( namespace, - SUPPORTED_TABLE_NAME + "_long_raw", + longRawSupportedTable, prepareColumnsForOracleLongRaw(), - longRawTableMetadata.getPartitionKeyNames())); - supportedTableMetadata.put(SUPPORTED_TABLE_NAME + "_long_raw", longRawTableMetadata); - - Map unsupportedTables = + longRawTableMetadata.getPartitionKeyNames()); + data.add( + JdbcTestData.createImportableTable( + longRawSupportedTable, + longRawSql, + longRawTableMetadata, + Collections.emptyMap(), + prepareInsertColumnsWithGenericValues(longRawTableMetadata))); + + data.addAll( prepareCreateNonImportableTableSql( namespace, majorVersion >= 20 @@ -487,55 +603,42 @@ private Map createExistingOracleDatabaseWithAllDataTypes( UNSUPPORTED_DATA_TYPES_ORACLE.stream(), UNSUPPORTED_DATA_TYPES_ORACLE_V20_OR_LATER.stream()) .collect(Collectors.toList()) - : UNSUPPORTED_DATA_TYPES_ORACLE); + : UNSUPPORTED_DATA_TYPES_ORACLE)); - return executeCreateTableSql(supportedTables, supportedTableMetadata, unsupportedTables); + executeCreateTableSql(data); + + return ImmutableList.copyOf(data); } - private Map createExistingSqlServerDatabaseWithAllDataTypes( - String namespace) throws SQLException { + private List createExistingSqlServerDatabaseWithAllDataTypes(String namespace) + throws SQLException { + List data = new ArrayList<>(); + TableMetadata tableMetadata = prepareTableMetadataForSqlServer(); - Map supportedTables = - Collections.singletonMap( + String sql = + prepareCreateTableSql( + namespace, SUPPORTED_TABLE_NAME, - prepareCreateTableSql( - namespace, - SUPPORTED_TABLE_NAME, - prepareColumnsForSqlServer(), - tableMetadata.getPartitionKeyNames())); - Map supportedTableMetadata = - Collections.singletonMap(SUPPORTED_TABLE_NAME, tableMetadata); - - Map unsupportedTables = - prepareCreateNonImportableTableSql(namespace, UNSUPPORTED_DATA_TYPES_MSSQL); - - return executeCreateTableSql(supportedTables, supportedTableMetadata, unsupportedTables); - } + prepareColumnsForSqlServer(), + tableMetadata.getPartitionKeyNames()); + data.add( + JdbcTestData.createImportableTable( + SUPPORTED_TABLE_NAME, + sql, + tableMetadata, + Collections.emptyMap(), + prepareInsertColumnsForSqlServer(tableMetadata))); - private Map executeCreateTableSql( - Map supportedTables, - Map supportedTableMetadata, - Map unsupportedTables) - throws SQLException { - Map results = new HashMap<>(); - List sqls = new ArrayList<>(); + data.addAll(prepareCreateNonImportableTableSql(namespace, UNSUPPORTED_DATA_TYPES_MSSQL)); - // table with all supported columns - supportedTables.forEach( - (table, sql) -> { - sqls.add(sql); - results.put(table, supportedTableMetadata.get(table)); - }); + executeCreateTableSql(data); - // tables with an unsupported column - unsupportedTables.forEach( - (table, sql) -> { - sqls.add(sql); - results.put(table, null); - }); + return ImmutableList.copyOf(data); + } - execute(sqls.toArray(new String[0])); - return results; + private void executeCreateTableSql(List data) throws SQLException { + String[] sqls = data.stream().map(JdbcTestData::getCreateTableSql).toArray(String[]::new); + execute(sqls); } private boolean isMariaDB() { @@ -555,7 +658,196 @@ private int getMajorVersion() { } } + private Map> prepareInsertColumnsWithGenericAndCustomValues( + TableMetadata tableMetadata, List> customColumns) { + Map> genericColumnValuesByName = + prepareInsertColumnsWithGenericValues(tableMetadata); + customColumns.forEach(column -> genericColumnValuesByName.put(column.getName(), column)); + + return genericColumnValuesByName; + } + + private Map> prepareInsertColumnsWithGenericValues( + TableMetadata tableMetadata) { + return tableMetadata.getColumnNames().stream() + .map( + columnName -> + prepareGenericColumnValue(columnName, tableMetadata.getColumnDataType(columnName))) + .collect(Collectors.toMap(Column::getName, column -> column)); + } + + private Column prepareGenericColumnValue(String columnName, DataType columnType) { + switch (columnType) { + case INT: + return IntColumn.of(columnName, 1); + case TEXT: + return TextColumn.of(columnName, "foo"); + case BLOB: + return BlobColumn.of(columnName, "ABCDE".getBytes(StandardCharsets.UTF_8)); + case FLOAT: + return FloatColumn.of(columnName, 1.2F); + case DOUBLE: + return DoubleColumn.of(columnName, 4.23); + case BIGINT: + return BigIntColumn.of(columnName, 101); + case BOOLEAN: + return BooleanColumn.of(columnName, true); + case DATE: + return DateColumn.of(columnName, LocalDate.of(1003, 7, 14)); + case TIME: + return TimeColumn.of(columnName, LocalTime.of(5, 45, 33, 123_456_000)); + case TIMESTAMP: + return TimestampColumn.of(columnName, LocalDateTime.of(1003, 3, 2, 8, 35, 12, 123_000_000)); + case TIMESTAMPTZ: + return TimestampTZColumn.of( + columnName, + LocalDateTime.of(1003, 3, 2, 8, 35, 12, 123_000_000).toInstant(ZoneOffset.UTC)); + default: + throw new AssertionError(); + } + } + public void close() throws SQLException { dataSource.close(); } + + @SuppressWarnings("UseCorrectAssertInTests") + public static class JdbcTestData implements TestData { + + private final String tableName; + private final String createTableSql; + private final @Nullable Map overrideColumnsType; + private final @Nullable TableMetadata tableMetadata; + private final @Nullable Map> columns; + + private JdbcTestData( + String tableName, + String createTableSql, + @Nullable Map overrideColumnsType, + @Nullable TableMetadata tableMetadata, + @Nullable Map> columns) { + this.tableName = tableName; + this.createTableSql = createTableSql; + this.tableMetadata = tableMetadata; + this.overrideColumnsType = overrideColumnsType; + this.columns = columns; + } + + /** + * Create test data for a table that can be imported. + * + * @param tableName the table name + * @param createTableSql the sql statement to create the table + * @param tableMetadata the expected table metadata of the table once imported + * @param overrideColumnsType the columns that needs to override the default data type mapping + * when importing the table + * @param columns the values of columns to perform write-read operation + * @return the test data + */ + public static JdbcTestData createImportableTable( + String tableName, + String createTableSql, + TableMetadata tableMetadata, + Map overrideColumnsType, + Map> columns) { + return new JdbcTestData( + tableName, createTableSql, overrideColumnsType, tableMetadata, columns); + } + + /** + * Create test data for a table that cannot be imported. + * + * @param tableName the table name + * @param createTableSql the sql statement to create the table + * @return the test data + */ + public static JdbcTestData createNonImportableTable(String tableName, String createTableSql) { + return new JdbcTestData(tableName, createTableSql, null, null, null); + } + + @Override + public boolean isImportableTable() { + return tableMetadata != null; + } + + @Override + public String getTableName() { + return tableName; + } + + private String getCreateTableSql() { + return createTableSql; + } + + @Override + public Map getOverrideColumnsType() { + assert overrideColumnsType != null; + return ImmutableMap.copyOf(overrideColumnsType); + } + + @Override + public TableMetadata getTableMetadata() { + assert tableMetadata != null; + return tableMetadata; + } + + @Override + public Insert getInsert(String namespace, String table) { + assert columns != null; + assert tableMetadata != null; + + InsertBuilder.Buildable insert = + Insert.newBuilder().namespace(namespace).table(table).partitionKey(preparePartitionKey()); + columns.forEach( + (name, column) -> { + if (!tableMetadata.getPartitionKeyNames().contains(name)) { + insert.value(column); + } + }); + + return insert.build(); + } + + @Override + public Put getPut(String namespace, String table) { + assert columns != null; + assert tableMetadata != null; + + PutBuilder.Buildable put = + Put.newBuilder().namespace(namespace).table(table).partitionKey(preparePartitionKey()); + columns.forEach( + (name, column) -> { + if (!tableMetadata.getPartitionKeyNames().contains(name)) { + put.value(column); + } + }); + + return put.build(); + } + + @Override + public Get getGet(String namespace, String table) { + assert columns != null; + + return Get.newBuilder() + .namespace(namespace) + .table(table) + .partitionKey(preparePartitionKey()) + .build(); + } + + private Key preparePartitionKey() { + assert tableMetadata != null; + Key.Builder key = Key.newBuilder(); + tableMetadata + .getPartitionKeyNames() + .forEach( + col -> { + assert columns != null; + key.add(columns.get(col)); + }); + + return key.build(); + } + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseCrossPartitionScanIntegrationTest.java index ae3a57ef4a..18a19dbcbc 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseCrossPartitionScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseCrossPartitionScanIntegrationTest.java @@ -4,8 +4,13 @@ import com.scalar.db.config.DatabaseConfig; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import java.util.Collections; +import java.util.List; import java.util.Properties; import java.util.Random; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.jupiter.params.provider.Arguments; public class JdbcDatabaseCrossPartitionScanIntegrationTest extends DistributedStorageCrossPartitionScanIntegrationTestBase { @@ -45,4 +50,25 @@ protected boolean isParallelDdlSupported() { } return super.isParallelDdlSupported(); } + + @Override + protected Stream provideColumnsForCNFConditionsTest() { + List allColumnNames = + prepareNonKeyColumns(0).stream().map(Column::getName).collect(Collectors.toList()); + + if ((JdbcTestUtils.isOracle(rdbEngine) + || JdbcTestUtils.isSqlServer(rdbEngine) + || JdbcTestUtils.isSqlite(rdbEngine))) { + // Oracle, SQLServer and SQLite do not support having too many conditions as CNF because it + // is converted internally to a query with conditions in DNF which can be too large for the + // storage to process. + // So we split the columns into two parts randomly to split the test into two executions + Collections.shuffle(allColumnNames, random.get()); + return Stream.of( + Arguments.of(allColumnNames.subList(0, allColumnNames.size() / 2)), + Arguments.of(allColumnNames.subList(allColumnNames.size() / 2, allColumnNames.size()))); + } else { + return Stream.of(Arguments.of(allColumnNames)); + } + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultipleClusteringKeyScanIntegrationTest.java index 51041370cd..1af1ea0eae 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultipleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultipleClusteringKeyScanIntegrationTest.java @@ -1,10 +1,13 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.errorprone.annotations.concurrent.LazyInit; import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import java.util.List; import java.util.Properties; import java.util.Random; @@ -79,4 +82,13 @@ protected Column getColumnWithMaxValue(String columnName, DataType dataType) } return super.getColumnWithMaxValue(columnName, dataType); } + + @Override + protected List getDataTypes() { + // TIMESTAMP WITH TIME ZONE type cannot be used as a primary key in Oracle + return JdbcTestUtils.filterDataTypes( + super.getDataTypes(), + rdbEngine, + ImmutableMap.of(RdbEngineOracle.class, ImmutableList.of(DataType.TIMESTAMPTZ))); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultiplePartitionKeyIntegrationTest.java index a0ed3a1250..7672b18074 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultiplePartitionKeyIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseMultiplePartitionKeyIntegrationTest.java @@ -1,10 +1,13 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.errorprone.annotations.concurrent.LazyInit; import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import java.util.List; import java.util.Properties; import java.util.Random; @@ -37,14 +40,6 @@ protected boolean isParallelDdlSupported() { return super.isParallelDdlSupported(); } - @Override - protected boolean isFloatTypeKeySupported() { - if (JdbcTestUtils.isYugabyte(rdbEngine)) { - return false; - } - return super.isFloatTypeKeySupported(); - } - @Override protected Column getColumnWithRandomValue( Random random, String columnName, DataType dataType) { @@ -80,4 +75,18 @@ protected Column getColumnWithMaxValue(String columnName, DataType dataType) } return super.getColumnWithMaxValue(columnName, dataType); } + + @Override + protected List getDataTypes() { + // TIMESTAMP WITH TIME ZONE type cannot be used as a primary key in Oracle + // FLOAT and DOUBLE types cannot be used as partition key in Yugabyte + return JdbcTestUtils.filterDataTypes( + super.getDataTypes(), + rdbEngine, + ImmutableMap.of( + RdbEngineOracle.class, + ImmutableList.of(DataType.TIMESTAMPTZ), + RdbEngineYugabyte.class, + ImmutableList.of(DataType.FLOAT, DataType.DOUBLE))); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSingleClusteringKeyScanIntegrationTest.java index 4a7e9eda95..01f3407a2e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSingleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSingleClusteringKeyScanIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import java.util.List; import java.util.Properties; import java.util.Random; @@ -55,4 +58,13 @@ protected Column getColumnWithMaxValue(String columnName, DataType dataType) } return super.getColumnWithMaxValue(columnName, dataType); } + + @Override + protected List getClusteringKeyTypes() { + // TIMESTAMP WITH TIME ZONE type cannot be used as a primary key in Oracle + return JdbcTestUtils.filterDataTypes( + super.getClusteringKeyTypes(), + rdbEngine, + ImmutableMap.of(RdbEngineOracle.class, ImmutableList.of(DataType.TIMESTAMPTZ))); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSinglePartitionKeyIntegrationTest.java index 6e70963e7a..4945551873 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSinglePartitionKeyIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcDatabaseSinglePartitionKeyIntegrationTest.java @@ -1,9 +1,12 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import java.util.List; import java.util.Properties; import java.util.Random; @@ -57,10 +60,16 @@ protected Column getColumnWithMaxValue(String columnName, DataType dataType) } @Override - protected boolean isFloatTypeKeySupported() { - if (JdbcTestUtils.isYugabyte(rdbEngine)) { - return false; - } - return super.isFloatTypeKeySupported(); + protected List getPartitionKeyTypes() { + // TIMESTAMP WITH TIME ZONE type cannot be used as a primary key in Oracle + // FLOAT and DOUBLE types cannot be used as partition key in Yugabyte + return JdbcTestUtils.filterDataTypes( + super.getPartitionKeyTypes(), + rdbEngine, + ImmutableMap.of( + RdbEngineOracle.class, + ImmutableList.of(DataType.TIMESTAMPTZ), + RdbEngineYugabyte.class, + ImmutableList.of(DataType.FLOAT, DataType.DOUBLE))); } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcSchemaLoaderImportIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcSchemaLoaderImportIntegrationTest.java index 77dda54b58..f767813231 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcSchemaLoaderImportIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcSchemaLoaderImportIntegrationTest.java @@ -1,9 +1,14 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.Uninterruptibles; +import com.scalar.db.api.TableMetadata; import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.io.DataType; import com.scalar.db.schemaloader.SchemaLoaderImportIntegrationTestBase; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.Test; @@ -12,6 +17,7 @@ import org.slf4j.LoggerFactory; public class JdbcSchemaLoaderImportIntegrationTest extends SchemaLoaderImportIntegrationTestBase { + private static final Logger logger = LoggerFactory.getLogger(JdbcSchemaLoaderImportIntegrationTest.class); @@ -31,21 +37,111 @@ protected Properties getProperties(String testName) { @SuppressFBWarnings("SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE") @Override protected void createImportableTable(String namespace, String table) throws Exception { - testUtils.execute( - "CREATE TABLE " - + rdbEngine.encloseFullTableName(namespace, table) - + "(" - + rdbEngine.enclose("pk") - + " CHAR(8)," - + rdbEngine.enclose("col") - + " CHAR(8), PRIMARY KEY(" - + rdbEngine.enclose("pk") - + "))"); + String sql; + + if (JdbcTestUtils.isMysql(rdbEngine)) { + sql = + "CREATE TABLE " + + rdbEngine.encloseFullTableName(namespace, table) + + "(" + + rdbEngine.enclose("pk") + + " CHAR(8)," + + rdbEngine.enclose("col1") + + " CHAR(8)," + + rdbEngine.enclose("col2") + + " DATETIME," + + "PRIMARY KEY(" + + rdbEngine.enclose("pk") + + "))"; + } else if (JdbcTestUtils.isOracle(rdbEngine)) { + sql = + "CREATE TABLE " + + rdbEngine.encloseFullTableName(namespace, table) + + "(" + + rdbEngine.enclose("pk") + + " CHAR(8)," + + rdbEngine.enclose("col1") + + " CHAR(8)," + + rdbEngine.enclose("col2") + + " DATE," + + "PRIMARY KEY(" + + rdbEngine.enclose("pk") + + "))"; + } else if (JdbcTestUtils.isPostgresql(rdbEngine) || JdbcTestUtils.isSqlServer(rdbEngine)) { + sql = + "CREATE TABLE " + + rdbEngine.encloseFullTableName(namespace, table) + + "(" + + rdbEngine.enclose("pk") + + " CHAR(8)," + + rdbEngine.enclose("col1") + + " CHAR(8)," + + "PRIMARY KEY(" + + rdbEngine.enclose("pk") + + "))"; + } else { + throw new AssertionError(); + } + + testUtils.execute(sql); + } + + @Override + protected Map getImportableTableOverrideColumnsType() { + // col1 type override confirms overriding with the default data type mapping does not fail + // col2 really performs a type override + if (JdbcTestUtils.isMysql(rdbEngine)) { + return ImmutableMap.of("col1", DataType.TEXT, "col2", DataType.TIMESTAMPTZ); + } else if (JdbcTestUtils.isOracle(rdbEngine)) { + return ImmutableMap.of("col1", DataType.TEXT, "col2", DataType.TIMESTAMP); + } else if (JdbcTestUtils.isPostgresql(rdbEngine) || JdbcTestUtils.isSqlServer(rdbEngine)) { + return ImmutableMap.of("col1", DataType.TEXT); + } else if (JdbcTestUtils.isSqlite(rdbEngine)) { + return Collections.emptyMap(); + } else { + throw new AssertionError(); + } + } + + @Override + protected TableMetadata getImportableTableMetadata(boolean hasTypeOverride) { + TableMetadata.Builder metadata = TableMetadata.newBuilder(); + metadata.addPartitionKey("pk"); + metadata.addColumn("pk", DataType.TEXT); + metadata.addColumn("col1", DataType.TEXT); + + if (JdbcTestUtils.isMysql(rdbEngine)) { + return metadata + .addColumn("col2", hasTypeOverride ? DataType.TIMESTAMPTZ : DataType.TIMESTAMP) + .build(); + } else if (JdbcTestUtils.isOracle(rdbEngine)) { + return metadata + .addColumn("col2", hasTypeOverride ? DataType.TIMESTAMP : DataType.DATE) + .build(); + } else if (JdbcTestUtils.isPostgresql(rdbEngine) || JdbcTestUtils.isSqlServer(rdbEngine)) { + return metadata.build(); + } else { + throw new AssertionError(); + } } @SuppressFBWarnings("SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE") @Override protected void createNonImportableTable(String namespace, String table) throws Exception { + String nonImportableDataType; + if (JdbcTestUtils.isMysql(rdbEngine)) { + nonImportableDataType = "YEAR"; + } else if (JdbcTestUtils.isPostgresql(rdbEngine)) { + nonImportableDataType = "INTERVAL"; + } else if (JdbcTestUtils.isOracle(rdbEngine)) { + nonImportableDataType = "INT"; + } else if (JdbcTestUtils.isSqlServer(rdbEngine)) { + nonImportableDataType = "MONEY"; + } else if (JdbcTestUtils.isSqlite(rdbEngine)) { + nonImportableDataType = "TEXT"; + } else { + throw new AssertionError(); + } testUtils.execute( "CREATE TABLE " + rdbEngine.encloseFullTableName(namespace, table) @@ -53,7 +149,9 @@ protected void createNonImportableTable(String namespace, String table) throws E + rdbEngine.enclose("pk") + " CHAR(8)," + rdbEngine.enclose("col") - + " DATE, PRIMARY KEY(" + + " " + + nonImportableDataType + + ", PRIMARY KEY(" + rdbEngine.enclose("pk") + "))"); } diff --git a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcTestUtils.java index ef0dd3b7d7..52da297d3e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcTestUtils.java +++ b/core/src/integration-test/java/com/scalar/db/storage/jdbc/JdbcTestUtils.java @@ -1,10 +1,15 @@ package com.scalar.db.storage.jdbc; import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.TextColumn; import com.scalar.db.util.TestUtils; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; import java.util.Random; +import java.util.stream.Collectors; import java.util.stream.IntStream; public final class JdbcTestUtils { @@ -24,8 +29,7 @@ public static Column getRandomOracleDoubleColumn(Random random, String column public static double nextOracleDouble(Random random) { return random - .doubles(MIN_ORACLE_DOUBLE_VALUE, MAX_ORACLE_DOUBLE_VALUE) - .limit(1) + .doubles(1, MIN_ORACLE_DOUBLE_VALUE, MAX_ORACLE_DOUBLE_VALUE) .findFirst() .orElse(0.0d); } @@ -68,4 +72,31 @@ public static boolean isSqlite(RdbEngineStrategy rdbEngine) { public static boolean isYugabyte(RdbEngineStrategy rdbEngine) { return rdbEngine instanceof RdbEngineYugabyte; } + + /** + * Filters the data types based on the RDB engine and the excluded data types. + * + * @param typesToBeFiltered the data types to be filtered + * @param currentRdbEngine the current {@code RdbEngine} used in the test + * @param excludedDataTypesByRdbEngine a list of data types to be excluded by {@code RdbEngine} + * @return the data types that are not excluded + */ + public static List filterDataTypes( + List typesToBeFiltered, + RdbEngineStrategy currentRdbEngine, + Map, List> excludedDataTypesByRdbEngine) { + return typesToBeFiltered.stream() + .filter( + type -> { + for (Entry, List> excludedTypesByEngine : + excludedDataTypesByRdbEngine.entrySet()) { + if (excludedTypesByEngine.getKey().equals(currentRdbEngine.getClass()) + && excludedTypesByEngine.getValue().contains(type)) { + return false; + } + } + return true; + }) + .collect(Collectors.toList()); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/multistorage/MultiStorageSchemaLoaderIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/multistorage/MultiStorageSchemaLoaderIntegrationTest.java index 0fa2ab2293..6d9ccc111e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/multistorage/MultiStorageSchemaLoaderIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/multistorage/MultiStorageSchemaLoaderIntegrationTest.java @@ -110,4 +110,9 @@ protected void waitForCreationIfNecessary() { // one session to the other, so we need to wait Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } } diff --git a/core/src/main/java/com/scalar/db/api/Admin.java b/core/src/main/java/com/scalar/db/api/Admin.java index 0bff210a17..4d7a088500 100644 --- a/core/src/main/java/com/scalar/db/api/Admin.java +++ b/core/src/main/java/com/scalar/db/api/Admin.java @@ -451,7 +451,28 @@ default void addNewColumnToTable( * table does not exist, or if the table does not meet the requirement of ScalarDB table * @throws ExecutionException if the operation fails */ - void importTable(String namespace, String table, Map options) + default void importTable(String namespace, String table, Map options) + throws ExecutionException { + importTable(namespace, table, options, Collections.emptyMap()); + } + + /** + * Imports an existing table that is not managed by ScalarDB. + * + * @param namespace an existing namespace + * @param table an existing table + * @param options options to import + * @param overrideColumnsType a map of column data type by column name. Only set the column for + * which you want to override the default data type mapping. + * @throws IllegalArgumentException if the table is already managed by ScalarDB, if the target + * table does not exist, or if the table does not meet the requirement of ScalarDB table + * @throws ExecutionException if the operation fails + */ + void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException; /** diff --git a/core/src/main/java/com/scalar/db/api/ConditionBuilder.java b/core/src/main/java/com/scalar/db/api/ConditionBuilder.java index 15d494bf88..6a8d0c4933 100644 --- a/core/src/main/java/com/scalar/db/api/ConditionBuilder.java +++ b/core/src/main/java/com/scalar/db/api/ConditionBuilder.java @@ -6,11 +6,19 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.ArrayList; import java.util.List; @@ -225,6 +233,46 @@ public ConditionalExpression isEqualToBlob(ByteBuffer value) { return new ConditionalExpression(columnName, value, Operator.EQ); } + /** + * Creates an 'equal' conditional expression for a DATE value. + * + * @param value a DATE value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isEqualToDate(LocalDate value) { + return new ConditionalExpression(DateColumn.of(columnName, value), Operator.EQ); + } + + /** + * Creates an 'equal' conditional expression for a TIME value. + * + * @param value a TIME value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isEqualToTime(LocalTime value) { + return new ConditionalExpression(TimeColumn.of(columnName, value), Operator.EQ); + } + + /** + * Creates an 'equal' conditional expression for a TIMESTAMP value. + * + * @param value a TIMESTAMP value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isEqualToTimestamp(LocalDateTime value) { + return new ConditionalExpression(TimestampColumn.of(columnName, value), Operator.EQ); + } + + /** + * Creates an 'equal' conditional expression for a TIMESTAMPTZ value. + * + * @param value a TIMESTAMPTZ value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isEqualToTimestampTZ(Instant value) { + return new ConditionalExpression(TimestampTZColumn.of(columnName, value), Operator.EQ); + } + /** * Creates a 'not equal' conditional expression for a BOOLEAN value. * @@ -304,7 +352,45 @@ public ConditionalExpression isNotEqualToBlob(byte[] value) { public ConditionalExpression isNotEqualToBlob(ByteBuffer value) { return new ConditionalExpression(columnName, value, Operator.NE); } + /** + * Creates a 'not equal' conditional expression for a DATE value. + * + * @param value a DATE value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isNotEqualToDate(LocalDate value) { + return new ConditionalExpression(DateColumn.of(columnName, value), Operator.NE); + } + + /** + * Creates a 'not equal' conditional expression for a TIME value. + * + * @param value a TIME value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isNotEqualToTime(LocalTime value) { + return new ConditionalExpression(TimeColumn.of(columnName, value), Operator.NE); + } + /** + * Creates a 'not equal' conditional expression for a TIMESTAMP value. + * + * @param value a TIMESTAMP value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isNotEqualToTimestamp(LocalDateTime value) { + return new ConditionalExpression(TimestampColumn.of(columnName, value), Operator.NE); + } + + /** + * Creates a 'not equal' conditional expression for a TIMESTAMPTZ value. + * + * @param value a TIMESTAMPTZ value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isNotEqualToTimestampTZ(Instant value) { + return new ConditionalExpression(TimestampTZColumn.of(columnName, value), Operator.NE); + } /** * Creates a 'greater than' conditional expression for a BOOLEAN value. * @@ -385,6 +471,46 @@ public ConditionalExpression isGreaterThanBlob(ByteBuffer value) { return new ConditionalExpression(columnName, value, Operator.GT); } + /** + * Creates a 'greater than' conditional expression for a DATE value. + * + * @param value a DATE value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanDate(LocalDate value) { + return new ConditionalExpression(DateColumn.of(columnName, value), Operator.GT); + } + + /** + * Creates a 'greater than' conditional expression for a TIME value. + * + * @param value a TIME value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanTime(LocalTime value) { + return new ConditionalExpression(TimeColumn.of(columnName, value), Operator.GT); + } + + /** + * Creates a 'greater than' conditional expression for a TIMESTAMP value. + * + * @param value a TIMESTAMP value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanTimestamp(LocalDateTime value) { + return new ConditionalExpression(TimestampColumn.of(columnName, value), Operator.GT); + } + + /** + * Creates a 'greater than' conditional expression for a TIMESTAMPTZ value. + * + * @param value a TIMESTAMPTZ value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanTimestampTZ(Instant value) { + return new ConditionalExpression(TimestampTZColumn.of(columnName, value), Operator.GT); + } + /** * Creates a 'greater than or equal' conditional expression for a BOOLEAN value. * @@ -464,6 +590,45 @@ public ConditionalExpression isGreaterThanOrEqualToBlob(byte[] value) { public ConditionalExpression isGreaterThanOrEqualToBlob(ByteBuffer value) { return new ConditionalExpression(columnName, value, Operator.GTE); } + /** + * Creates a 'greater than or equal' conditional expression for a DATE value. + * + * @param value a DATE value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanOrEqualToDate(LocalDate value) { + return new ConditionalExpression(DateColumn.of(columnName, value), Operator.GTE); + } + + /** + * Creates a 'greater than or equal' conditional expression for a TIME value. + * + * @param value a TIME value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanOrEqualToTime(LocalTime value) { + return new ConditionalExpression(TimeColumn.of(columnName, value), Operator.GTE); + } + + /** + * Creates a 'greater than or equal' conditional expression for a TIMESTAMP value. + * + * @param value a TIMESTAMP value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanOrEqualToTimestamp(LocalDateTime value) { + return new ConditionalExpression(TimestampColumn.of(columnName, value), Operator.GTE); + } + + /** + * Creates a 'greater than or equal' conditional expression for a TIMESTAMPTZ value. + * + * @param value a TIMESTAMPTZ value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isGreaterThanOrEqualToTimestampTZ(Instant value) { + return new ConditionalExpression(TimestampTZColumn.of(columnName, value), Operator.GTE); + } /** * Creates a 'less than' conditional expression for a BOOLEAN value. @@ -544,7 +709,45 @@ public ConditionalExpression isLessThanBlob(byte[] value) { public ConditionalExpression isLessThanBlob(ByteBuffer value) { return new ConditionalExpression(columnName, value, Operator.LT); } + /** + * Creates a 'less than' conditional expression for a DATE value. + * + * @param value a DATE value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanDate(LocalDate value) { + return new ConditionalExpression(DateColumn.of(columnName, value), Operator.LT); + } + + /** + * Creates a 'less than' conditional expression for a TIME value. + * + * @param value a TIME value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanTime(LocalTime value) { + return new ConditionalExpression(TimeColumn.of(columnName, value), Operator.LT); + } + + /** + * Creates a 'less than' conditional expression for a TIMESTAMP value. + * + * @param value a TIMESTAMP value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanTimestamp(LocalDateTime value) { + return new ConditionalExpression(TimestampColumn.of(columnName, value), Operator.LT); + } + /** + * Creates a 'less than' conditional expression for a TIMESTAMPTZ value. + * + * @param value a TIMESTAMPTZ value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanTimestampTZ(Instant value) { + return new ConditionalExpression(TimestampTZColumn.of(columnName, value), Operator.LT); + } /** * Creates a 'less than or equal' conditional expression for a BOOLEAN value. * @@ -625,6 +828,46 @@ public ConditionalExpression isLessThanOrEqualToBlob(ByteBuffer value) { return new ConditionalExpression(columnName, value, Operator.LTE); } + /** + * Creates a 'less than or equal' conditional expression for a DATE value. + * + * @param value a DATE value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanOrEqualToDate(LocalDate value) { + return new ConditionalExpression(DateColumn.of(columnName, value), Operator.LTE); + } + + /** + * Creates a 'less than or equal' conditional expression for a TIME value. + * + * @param value a TIME value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanOrEqualToTime(LocalTime value) { + return new ConditionalExpression(TimeColumn.of(columnName, value), Operator.LTE); + } + + /** + * Creates a 'less than or equal' conditional expression for a TIMESTAMP value. + * + * @param value a TIMESTAMP value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanOrEqualToTimestamp(LocalDateTime value) { + return new ConditionalExpression(TimestampColumn.of(columnName, value), Operator.LTE); + } + + /** + * Creates a 'less than or equal' conditional expression for a TIMESTAMPTZ value. + * + * @param value a TIMESTAMPTZ value used to compare with the target column + * @return a conditional expression + */ + public ConditionalExpression isLessThanOrEqualToTimestampTZ(Instant value) { + return new ConditionalExpression(TimestampTZColumn.of(columnName, value), Operator.LTE); + } + /** * Creates a 'is null' conditional expression for a BOOLEAN value. * @@ -688,6 +931,42 @@ public ConditionalExpression isNullBlob() { return new ConditionalExpression(BlobColumn.ofNull(columnName), Operator.IS_NULL); } + /** + * Creates an 'is null' conditional expression for a DATE value. + * + * @return a conditional expression + */ + public ConditionalExpression isNullDate() { + return new ConditionalExpression(DateColumn.ofNull(columnName), Operator.IS_NULL); + } + + /** + * Creates an 'is null' conditional expression for a TIME value. + * + * @return a conditional expression + */ + public ConditionalExpression isNullTime() { + return new ConditionalExpression(TimeColumn.ofNull(columnName), Operator.IS_NULL); + } + + /** + * Creates an 'is null' conditional expression for a TIMESTAMP value. + * + * @return a conditional expression + */ + public ConditionalExpression isNullTimestamp() { + return new ConditionalExpression(TimestampColumn.ofNull(columnName), Operator.IS_NULL); + } + + /** + * Creates an 'is null' conditional expression for a TIMESTAMPTZ value. + * + * @return a conditional expression + */ + public ConditionalExpression isNullTimestampTZ() { + return new ConditionalExpression(TimestampTZColumn.ofNull(columnName), Operator.IS_NULL); + } + /** * Creates a 'is not null' conditional expression for a BOOLEAN value. * @@ -750,6 +1029,41 @@ public ConditionalExpression isNotNullText() { public ConditionalExpression isNotNullBlob() { return new ConditionalExpression(BlobColumn.ofNull(columnName), Operator.IS_NOT_NULL); } + /** + * Creates an 'is not null' conditional expression for a DATE value. + * + * @return a conditional expression + */ + public ConditionalExpression isNotNullDate() { + return new ConditionalExpression(DateColumn.ofNull(columnName), Operator.IS_NOT_NULL); + } + + /** + * Creates an 'is not null' conditional expression for a TIME value. + * + * @return a conditional expression + */ + public ConditionalExpression isNotNullTime() { + return new ConditionalExpression(TimeColumn.ofNull(columnName), Operator.IS_NOT_NULL); + } + + /** + * Creates an 'is not null' conditional expression for a TIMESTAMP value. + * + * @return a conditional expression + */ + public ConditionalExpression isNotNullTimestamp() { + return new ConditionalExpression(TimestampColumn.ofNull(columnName), Operator.IS_NOT_NULL); + } + + /** + * Creates an 'is not null' conditional expression for a TIMESTAMPTZ value. + * + * @return a conditional expression + */ + public ConditionalExpression isNotNullTimestampTZ() { + return new ConditionalExpression(TimestampTZColumn.ofNull(columnName), Operator.IS_NOT_NULL); + } /** * Creates a 'like' conditional expression for a TEXT value. For the escape character, the diff --git a/core/src/main/java/com/scalar/db/api/ConditionalExpression.java b/core/src/main/java/com/scalar/db/api/ConditionalExpression.java index 7970aaf8ef..9892dc9e24 100644 --- a/core/src/main/java/com/scalar/db/api/ConditionalExpression.java +++ b/core/src/main/java/com/scalar/db/api/ConditionalExpression.java @@ -12,6 +12,10 @@ import com.scalar.db.io.Value; import com.scalar.db.util.ScalarDbUtils; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Objects; import javax.annotation.concurrent.Immutable; @@ -280,6 +284,42 @@ public byte[] getBlobValueAsBytes() { return column.getBlobValueAsBytes(); } + /** + * Returns the DATE value to compare with the target column. + * + * @return the DATE value to compare with the target column + */ + public LocalDate getDateValue() { + return column.getDateValue(); + } + + /** + * Returns the TIME value to compare with the target column. + * + * @return the TIME value to compare with the target column + */ + public LocalTime getTimeValue() { + return column.getTimeValue(); + } + + /** + * Returns the TIMESTAMP value to compare with the target column. + * + * @return the TIMESTAMP value to compare with the target column + */ + public LocalDateTime getTimestampValue() { + return column.getTimestampValue(); + } + + /** + * Returns the TIMESTAMPTZ value to compare with the target column. + * + * @return the TIMESTAMPTZ value to compare with the target column + */ + public Instant getTimestampTZValue() { + return column.getTimestampTZValue(); + } + /** * Returns the value to compare with the target column as an Object type. * diff --git a/core/src/main/java/com/scalar/db/api/DistributedStorageAdmin.java b/core/src/main/java/com/scalar/db/api/DistributedStorageAdmin.java index d7296a573f..2643cb4f57 100644 --- a/core/src/main/java/com/scalar/db/api/DistributedStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/api/DistributedStorageAdmin.java @@ -2,6 +2,8 @@ import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.io.DataType; +import java.util.Collections; +import java.util.Map; /** * An administrative interface for distributed storage implementations. The user can execute @@ -52,7 +54,26 @@ public interface DistributedStorageAdmin extends Admin, AutoCloseable { * @throws ExecutionException if the operation fails * @return import table metadata in the ScalarDB format */ - TableMetadata getImportTableMetadata(String namespace, String table) throws ExecutionException; + default TableMetadata getImportTableMetadata(String namespace, String table) + throws ExecutionException { + return getImportTableMetadata(namespace, table, Collections.emptyMap()); + } + + /** + * Get import table metadata in the ScalarDB format. + * + * @param namespace namespace name of import table + * @param table import table name + * @param overrideColumnsType a map of column data type by column name. Only set the column for + * which you want to override the default data type mapping. + * @throws IllegalArgumentException if the table does not exist + * @throws IllegalStateException if the table does not meet the requirement of ScalarDB table + * @throws ExecutionException if the operation fails + * @return import table metadata in the ScalarDB format + */ + TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) + throws ExecutionException; /** * Add a column in the table without updating the metadata table in ScalarDB. diff --git a/core/src/main/java/com/scalar/db/api/InsertBuilder.java b/core/src/main/java/com/scalar/db/api/InsertBuilder.java index 137f408c09..c48406054a 100644 --- a/core/src/main/java/com/scalar/db/api/InsertBuilder.java +++ b/core/src/main/java/com/scalar/db/api/InsertBuilder.java @@ -16,12 +16,20 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -198,6 +206,30 @@ public Buildable blobValue(String columnName, @Nullable ByteBuffer value) { return this; } + @Override + public Buildable dateValue(String columnName, @Nullable LocalDate value) { + columns.put(columnName, DateColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timeValue(String columnName, @Nullable LocalTime value) { + columns.put(columnName, TimeColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampValue(String columnName, @Nullable LocalDateTime value) { + columns.put(columnName, TimestampColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampTZValue(String columnName, @Nullable Instant value) { + columns.put(columnName, TimestampTZColumn.of(columnName, value)); + return this; + } + @Override public Buildable value(Column column) { columns.put(column.getName(), column); @@ -352,6 +384,30 @@ public BuildableFromExisting blobValue(String columnName, @Nullable ByteBuffer v return this; } + @Override + public BuildableFromExisting dateValue(String columnName, @Nullable LocalDate value) { + super.dateValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timeValue(String columnName, @Nullable LocalTime value) { + super.timeValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampValue(String columnName, @Nullable LocalDateTime value) { + super.timestampValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampTZValue(String columnName, @Nullable Instant value) { + super.timestampTZValue(columnName, value); + return this; + } + @Override public BuildableFromExisting value(Column column) { super.value(column); diff --git a/core/src/main/java/com/scalar/db/api/OperationBuilder.java b/core/src/main/java/com/scalar/db/api/OperationBuilder.java index 6838222025..b1ad47cf0d 100644 --- a/core/src/main/java/com/scalar/db/api/OperationBuilder.java +++ b/core/src/main/java/com/scalar/db/api/OperationBuilder.java @@ -3,6 +3,10 @@ import com.scalar.db.io.Column; import com.scalar.db.io.Key; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Collection; import java.util.Map; import java.util.Set; @@ -254,6 +258,42 @@ public interface Values { */ T blobValue(String columnName, @Nullable ByteBuffer value); + /** + * Adds the specified DATE value as a LocalDate to the list of put values. + * + * @param columnName a column name of the value + * @param value a DATE value to put as LocalDate type + * @return the Put operation builder + */ + T dateValue(String columnName, @Nullable LocalDate value); + + /** + * Adds the specified TIME value as a LocalTime to the list of put values. + * + * @param columnName a column name of the value + * @param value a TIME value to put as LocalTime type + * @return the Put operation builder + */ + T timeValue(String columnName, @Nullable LocalTime value); + + /** + * Adds the specified TIMESTAMP value as a LocalDateTime value to the list of put values. + * + * @param columnName a column name of the value + * @param value a TIMESTAMP value to put as LocalDateTime type + * @return the Put operation builder + */ + T timestampValue(String columnName, @Nullable LocalDateTime value); + + /** + * Adds the specified TIMESTAMPTZ value as an Instant to the list of put values. + * + * @param columnName a column name of the value + * @param value a TIMESTAMPTZ value to put as Instant type + * @return the Put operation builder + */ + T timestampTZValue(String columnName, @Nullable Instant value); + /** * Adds a column to the list of put values. * diff --git a/core/src/main/java/com/scalar/db/api/Put.java b/core/src/main/java/com/scalar/db/api/Put.java index 1f6c373376..cf14bbfc1a 100644 --- a/core/src/main/java/com/scalar/db/api/Put.java +++ b/core/src/main/java/com/scalar/db/api/Put.java @@ -22,6 +22,10 @@ import com.scalar.db.transaction.consensuscommit.ConsensusCommitOperationAttributes; import com.scalar.db.util.ScalarDbUtils; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; @@ -679,6 +683,58 @@ public byte[] getBlobValueAsBytes(String columnName) { return columns.get(columnName).getBlobValueAsBytes(); } + /** + * Returns the DATE value of the specified column added to the list of put values as a LocalDate + * type. + * + * @param columnName a column name of the value + * @return the DATE value of the specified column. If the value is NULL, null + */ + @Nullable + public LocalDate getDateValue(String columnName) { + checkIfExists(columnName); + return columns.get(columnName).getDateValue(); + } + + /** + * Returns the TIME value of the specified column added to the list of put values as a LocalTime + * type. + * + * @param columnName a column name of the value + * @return the TIME value of the specified column. If the value is NULL, null + */ + @Nullable + public LocalTime getTimeValue(String columnName) { + checkIfExists(columnName); + return columns.get(columnName).getTimeValue(); + } + + /** + * Returns the TIMESTAMP value of the specified column added to the list of put values as a + * LocalDateTime type. + * + * @param columnName a column name of the value + * @return the TIMESTAMP value of the specified column. If the value is NULL, null + */ + @Nullable + public LocalDateTime getTimestampValue(String columnName) { + checkIfExists(columnName); + return columns.get(columnName).getTimestampValue(); + } + + /** + * Returns the TIMESTAMPTZ value of the specified column added to the list of put values as an + * Instant type. + * + * @param columnName a column name of the value + * @return the TIMESTAMPTZ value of the specified column. If the value is NULL, null + */ + @Nullable + public Instant getTimestampTZValue(String columnName) { + checkIfExists(columnName); + return columns.get(columnName).getTimestampTZValue(); + } + /** * Returns the value of the specified column added to the list of put values as an Object type. * diff --git a/core/src/main/java/com/scalar/db/api/PutBuilder.java b/core/src/main/java/com/scalar/db/api/PutBuilder.java index 3575ec763b..227a5acf9d 100644 --- a/core/src/main/java/com/scalar/db/api/PutBuilder.java +++ b/core/src/main/java/com/scalar/db/api/PutBuilder.java @@ -21,13 +21,21 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.transaction.consensuscommit.ConsensusCommitOperationAttributes; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -226,6 +234,30 @@ public Buildable blobValue(String columnName, @Nullable ByteBuffer value) { return this; } + @Override + public Buildable dateValue(String columnName, @Nullable LocalDate value) { + columns.put(columnName, DateColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timeValue(String columnName, @Nullable LocalTime value) { + columns.put(columnName, TimeColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampValue(String columnName, @Nullable LocalDateTime value) { + columns.put(columnName, TimestampColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampTZValue(String columnName, @Nullable Instant value) { + columns.put(columnName, TimestampTZColumn.of(columnName, value)); + return this; + } + @Override public Buildable value(Column column) { columns.put(column.getName(), column); @@ -438,6 +470,30 @@ public BuildableFromExisting blobValue(String columnName, @Nullable ByteBuffer v return this; } + @Override + public BuildableFromExisting dateValue(String columnName, @Nullable LocalDate value) { + super.dateValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timeValue(String columnName, @Nullable LocalTime value) { + super.timeValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampValue(String columnName, @Nullable LocalDateTime value) { + super.timestampValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampTZValue(String columnName, @Nullable Instant value) { + super.timestampTZValue(columnName, value); + return this; + } + @Override public BuildableFromExisting value(Column column) { super.value(column); diff --git a/core/src/main/java/com/scalar/db/api/Result.java b/core/src/main/java/com/scalar/db/api/Result.java index 3305571444..0a84745c05 100644 --- a/core/src/main/java/com/scalar/db/api/Result.java +++ b/core/src/main/java/com/scalar/db/api/Result.java @@ -4,6 +4,10 @@ import com.scalar.db.io.Key; import com.scalar.db.io.Value; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -168,6 +172,45 @@ default ByteBuffer getBlob(String columnName) { @Nullable byte[] getBlobAsBytes(String columnName); + /** + * Returns the DATE value of the specified column as a Java LocalDate type. + * + * @param columnName a column name of the value + * @return the DATE value of the specified column as a Java LocalDate type. If the value is NULL, + * null + */ + @Nullable + LocalDate getDate(String columnName); + + /** + * Returns the TIME value of the specified column as a Java LocalTime type. + * + * @param columnName a column name of the value + * @return the TIME value of the specified column as a Java LocalTime type. If the value is NULL, + * null + */ + @Nullable + LocalTime getTime(String columnName); + + /** + * Returns the TIMESTAMP value of the specified column as a Java LocalDateTime type. + * + * @param columnName a column name of the value + * @return the TIMESTAMP value of the specified column as a Java LocalDateTime type. If the value + * is NULL, null + */ + @Nullable + LocalDateTime getTimestamp(String columnName); + /** + * Returns the TIMESTAMPTZ value of the specified column as a Java Instant type. + * + * @param columnName a column name of the value + * @return the TIMESTAMPTZ value of the specified column as a Java Instant type. If the value is + * NULL, null + */ + @Nullable + Instant getTimestampTZ(String columnName); + /** * Returns the value of the specified column as a Java Object type. * diff --git a/core/src/main/java/com/scalar/db/api/UpdateBuilder.java b/core/src/main/java/com/scalar/db/api/UpdateBuilder.java index 69975a81eb..4a5640a131 100644 --- a/core/src/main/java/com/scalar/db/api/UpdateBuilder.java +++ b/core/src/main/java/com/scalar/db/api/UpdateBuilder.java @@ -22,12 +22,20 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -233,6 +241,30 @@ public Buildable blobValue(String columnName, @Nullable ByteBuffer value) { return this; } + @Override + public Buildable dateValue(String columnName, @Nullable LocalDate value) { + columns.put(columnName, DateColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timeValue(String columnName, @Nullable LocalTime value) { + columns.put(columnName, TimeColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampValue(String columnName, @Nullable LocalDateTime value) { + columns.put(columnName, TimestampColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampTZValue(String columnName, @Nullable Instant value) { + columns.put(columnName, TimestampTZColumn.of(columnName, value)); + return this; + } + @Override public Buildable value(Column column) { columns.put(column.getName(), column); @@ -410,6 +442,30 @@ public BuildableFromExisting blobValue(String columnName, @Nullable ByteBuffer v return this; } + @Override + public BuildableFromExisting dateValue(String columnName, @Nullable LocalDate value) { + super.dateValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timeValue(String columnName, @Nullable LocalTime value) { + super.timeValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampValue(String columnName, @Nullable LocalDateTime value) { + super.timestampValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampTZValue(String columnName, @Nullable Instant value) { + super.timestampTZValue(columnName, value); + return this; + } + @Override public BuildableFromExisting value(Column column) { super.value(column); diff --git a/core/src/main/java/com/scalar/db/api/UpsertBuilder.java b/core/src/main/java/com/scalar/db/api/UpsertBuilder.java index f30f5dff24..f80c88e44a 100644 --- a/core/src/main/java/com/scalar/db/api/UpsertBuilder.java +++ b/core/src/main/java/com/scalar/db/api/UpsertBuilder.java @@ -20,12 +20,20 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -222,6 +230,30 @@ public Buildable blobValue(String columnName, @Nullable ByteBuffer value) { return this; } + @Override + public Buildable dateValue(String columnName, @Nullable LocalDate value) { + columns.put(columnName, DateColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timeValue(String columnName, @Nullable LocalTime value) { + columns.put(columnName, TimeColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampValue(String columnName, @Nullable LocalDateTime value) { + columns.put(columnName, TimestampColumn.of(columnName, value)); + return this; + } + + @Override + public Buildable timestampTZValue(String columnName, @Nullable Instant value) { + columns.put(columnName, TimestampTZColumn.of(columnName, value)); + return this; + } + @Override public Buildable value(Column column) { columns.put(column.getName(), column); @@ -390,6 +422,30 @@ public BuildableFromExisting blobValue(String columnName, @Nullable ByteBuffer v return this; } + @Override + public BuildableFromExisting dateValue(String columnName, @Nullable LocalDate value) { + super.dateValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timeValue(String columnName, @Nullable LocalTime value) { + super.timeValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampValue(String columnName, @Nullable LocalDateTime value) { + super.timestampValue(columnName, value); + return this; + } + + @Override + public BuildableFromExisting timestampTZValue(String columnName, @Nullable Instant value) { + super.timestampTZValue(columnName, value); + return this; + } + @Override public BuildableFromExisting value(Column column) { super.value(column); diff --git a/core/src/main/java/com/scalar/db/common/AbstractResult.java b/core/src/main/java/com/scalar/db/common/AbstractResult.java index ec9b79b542..0976f49813 100644 --- a/core/src/main/java/com/scalar/db/common/AbstractResult.java +++ b/core/src/main/java/com/scalar/db/common/AbstractResult.java @@ -3,9 +3,9 @@ import com.google.common.base.MoreObjects; import com.google.common.base.MoreObjects.ToStringHelper; import com.google.common.base.Suppliers; -import com.google.common.collect.ImmutableMap; import com.scalar.db.api.Result; import com.scalar.db.common.error.CoreError; +import com.scalar.db.io.Column; import com.scalar.db.io.Value; import com.scalar.db.util.ScalarDbUtils; import java.util.ArrayList; @@ -20,19 +20,9 @@ public abstract class AbstractResult implements Result { - private final Supplier>> valuesWithDefaultValues; private final Supplier hashCode; public AbstractResult() { - valuesWithDefaultValues = - Suppliers.memoize( - () -> - ImmutableMap.copyOf( - getColumns().entrySet().stream() - .collect( - Collectors.toMap( - Entry::getKey, e -> ScalarDbUtils.toValue(e.getValue()))))); - hashCode = Suppliers.memoize( () -> { @@ -56,14 +46,20 @@ protected void checkIfExists(String name) { @Deprecated @Override public Optional> getValue(String columnName) { - return Optional.ofNullable(valuesWithDefaultValues.get().get(columnName)); + Column column = getColumns().get(columnName); + if (column == null) { + return Optional.empty(); + } else { + return Optional.of(ScalarDbUtils.toValue(column)); + } } /** @deprecated As of release 3.6.0. Will be removed in release 5.0.0 */ @Deprecated @Override public Map> getValues() { - return valuesWithDefaultValues.get(); + return getColumns().entrySet().stream() + .collect(Collectors.toMap(Entry::getKey, e -> ScalarDbUtils.toValue(e.getValue()))); } @Override diff --git a/core/src/main/java/com/scalar/db/common/CheckedDistributedStorageAdmin.java b/core/src/main/java/com/scalar/db/common/CheckedDistributedStorageAdmin.java index 1ebb2c25dc..462d02132d 100644 --- a/core/src/main/java/com/scalar/db/common/CheckedDistributedStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/common/CheckedDistributedStorageAdmin.java @@ -313,10 +313,11 @@ public Set getNamespaceNames() throws ExecutionException { } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) throws ExecutionException { try { - return admin.getImportTableMetadata(namespace, table); + return admin.getImportTableMetadata(namespace, table, overrideColumnsType); } catch (ExecutionException e) { throw new ExecutionException( CoreError.GETTING_IMPORT_TABLE_METADATA_FAILED.buildMessage( @@ -326,7 +327,11 @@ public TableMetadata getImportTableMetadata(String namespace, String table) } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { TableMetadata tableMetadata = getTableMetadata(namespace, table); if (tableMetadata != null) { @@ -336,7 +341,7 @@ public void importTable(String namespace, String table, Map opti } try { - admin.importTable(namespace, table, options); + admin.importTable(namespace, table, options, overrideColumnsType); } catch (ExecutionException e) { throw new ExecutionException( CoreError.IMPORTING_TABLE_FAILED.buildMessage( diff --git a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java index cf3657b419..588ec40a77 100644 --- a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java @@ -196,9 +196,13 @@ public void addNewColumnToTable( } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { - distributedTransactionAdmin.importTable(namespace, table, options); + distributedTransactionAdmin.importTable(namespace, table, options, overrideColumnsType); } @Override diff --git a/core/src/main/java/com/scalar/db/common/ProjectedResult.java b/core/src/main/java/com/scalar/db/common/ProjectedResult.java index 2247d9e55d..8d93238199 100644 --- a/core/src/main/java/com/scalar/db/common/ProjectedResult.java +++ b/core/src/main/java/com/scalar/db/common/ProjectedResult.java @@ -6,6 +6,10 @@ import com.scalar.db.io.Column; import com.scalar.db.io.Key; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -119,6 +123,34 @@ public byte[] getBlobAsBytes(String columnName) { return original.getBlobAsBytes(columnName); } + @Nullable + @Override + public LocalDate getDate(String columnName) { + checkIfExists(columnName); + return original.getDate(columnName); + } + + @Nullable + @Override + public LocalTime getTime(String columnName) { + checkIfExists(columnName); + return original.getTime(columnName); + } + + @Nullable + @Override + public LocalDateTime getTimestamp(String columnName) { + checkIfExists(columnName); + return original.getTimestamp(columnName); + } + + @Nullable + @Override + public Instant getTimestampTZ(String columnName) { + checkIfExists(columnName); + return original.getTimestampTZ(columnName); + } + @Nullable @Override public Object getAsObject(String columnName) { diff --git a/core/src/main/java/com/scalar/db/common/ResultImpl.java b/core/src/main/java/com/scalar/db/common/ResultImpl.java index 0b5872aa28..df6cc528fd 100644 --- a/core/src/main/java/com/scalar/db/common/ResultImpl.java +++ b/core/src/main/java/com/scalar/db/common/ResultImpl.java @@ -6,6 +6,10 @@ import com.scalar.db.io.Key; import com.scalar.db.util.ScalarDbUtils; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -113,6 +117,42 @@ public byte[] getBlobAsBytes(String columnName) { return column.getBlobValueAsBytes(); } + @Nullable + @Override + public LocalDate getDate(String columnName) { + checkIfExists(columnName); + Column column = columns.get(columnName); + assert column != null; + return column.getDateValue(); + } + + @Nullable + @Override + public LocalTime getTime(String columnName) { + checkIfExists(columnName); + Column column = columns.get(columnName); + assert column != null; + return column.getTimeValue(); + } + + @Nullable + @Override + public LocalDateTime getTimestamp(String columnName) { + checkIfExists(columnName); + Column column = columns.get(columnName); + assert column != null; + return column.getTimestampValue(); + } + + @Nullable + @Override + public Instant getTimestampTZ(String columnName) { + checkIfExists(columnName); + Column column = columns.get(columnName); + assert column != null; + return column.getTimestampTZValue(); + } + @Nullable @Override public Object getAsObject(String columnName) { diff --git a/core/src/main/java/com/scalar/db/common/checker/ColumnChecker.java b/core/src/main/java/com/scalar/db/common/checker/ColumnChecker.java index a7c33cf31d..02c383575a 100644 --- a/core/src/main/java/com/scalar/db/common/checker/ColumnChecker.java +++ b/core/src/main/java/com/scalar/db/common/checker/ColumnChecker.java @@ -7,10 +7,14 @@ import com.scalar.db.io.Column; import com.scalar.db.io.ColumnVisitor; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import javax.annotation.concurrent.NotThreadSafe; /** A checker for the columns of a table for the storage abstraction. */ @@ -159,4 +163,60 @@ public void visit(BlobColumn column) { } isValid = tableMetadata.getColumnDataType(column.getName()) == DataType.BLOB; } + + @Override + public void visit(DateColumn column) { + if (requireNotNull && column.hasNullValue()) { + isValid = false; + return; + } + if (requireNull && !column.hasNullValue()) { + isValid = false; + return; + } + + isValid = tableMetadata.getColumnDataType(column.getName()) == DataType.DATE; + } + + @Override + public void visit(TimeColumn column) { + if (requireNotNull && column.hasNullValue()) { + isValid = false; + return; + } + if (requireNull && !column.hasNullValue()) { + isValid = false; + return; + } + + isValid = tableMetadata.getColumnDataType(column.getName()) == DataType.TIME; + } + + @Override + public void visit(TimestampColumn column) { + if (requireNotNull && column.hasNullValue()) { + isValid = false; + return; + } + if (requireNull && !column.hasNullValue()) { + isValid = false; + return; + } + + isValid = tableMetadata.getColumnDataType(column.getName()) == DataType.TIMESTAMP; + } + + @Override + public void visit(TimestampTZColumn column) { + if (requireNotNull && column.hasNullValue()) { + isValid = false; + return; + } + if (requireNull && !column.hasNullValue()) { + isValid = false; + return; + } + + isValid = tableMetadata.getColumnDataType(column.getName()) == DataType.TIMESTAMPTZ; + } } diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index 397ac6ac01..a75905d15c 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -718,6 +718,48 @@ public enum CoreError implements ScalarDbError { "The provided partition key order does not match the table schema. Required order: %s", "", ""), + OUT_OF_RANGE_COLUMN_VALUE_FOR_DATE( + Category.USER_ERROR, + "0158", + "This DATE column value is out of the valid range. It must be between 1000-01-01 and 9999-12-12. Value: %s", + "", + ""), + SUBMICROSECOND_PRECISION_NOT_SUPPORTED_FOR_TIME( + Category.USER_ERROR, + "0159", + "This TIME column value precision cannot be shorter than one microsecond. Value: %s", + "", + ""), + OUT_OF_RANGE_COLUMN_VALUE_FOR_TIMESTAMP( + Category.USER_ERROR, + "0160", + "This TIMESTAMP column value is out of the valid range. It must be between 1000-01-01T00:00:00.000 and 9999-12-31T23:59:59.999. Value: %s", + "", + ""), + SUBMILLISECOND_PRECISION_NOT_SUPPORTED_FOR_TIMESTAMP( + Category.USER_ERROR, + "0161", + "This TIMESTAMP column value precision cannot be shorter than one millisecond. Value: %s", + "", + ""), + OUT_OF_RANGE_COLUMN_VALUE_FOR_TIMESTAMPTZ( + Category.USER_ERROR, + "0162", + "This TIMESTAMPTZ column value is out of the valid range. It must be between 1000-01-01T00:00:00.000Z to 9999-12-31T23:59:59.999Z. Value: %s", + "", + ""), + SUBMILLISECOND_PRECISION_NOT_SUPPORTED_FOR_TIMESTAMPTZ( + Category.USER_ERROR, + "0163", + "This TIMESTAMPTZ column value precision cannot be shorter than one millisecond. Value: %s", + "", + ""), + JDBC_IMPORT_DATA_TYPE_OVERRIDE_NOT_SUPPORTED( + Category.USER_ERROR, + "0164", + "The underlying-storage data type %s is not supported as the ScalarDB %s data type: %s", + "", + ""), // // Errors for the concurrency error category diff --git a/core/src/main/java/com/scalar/db/io/Column.java b/core/src/main/java/com/scalar/db/io/Column.java index e46a364f44..80b3c702c8 100644 --- a/core/src/main/java/com/scalar/db/io/Column.java +++ b/core/src/main/java/com/scalar/db/io/Column.java @@ -1,6 +1,10 @@ package com.scalar.db.io; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Optional; import javax.annotation.Nullable; @@ -153,6 +157,46 @@ default byte[] getBlobValueAsBytes() { throw new UnsupportedOperationException("The data type of this column is " + getDataType()); } + /** + * Returns the DATE value of this {@code Column} as a Java LocalDate type. + * + * @return the value of this {@code Column}. if the value is NULL, null + */ + @Nullable + default LocalDate getDateValue() { + throw new UnsupportedOperationException("The data type of this column is " + getDataType()); + } + + /** + * Returns the TIME value of this {@code Column} as a Java LocalTime type. + * + * @return the value of this {@code Column}. if the value is NULL, null + */ + @Nullable + default LocalTime getTimeValue() { + throw new UnsupportedOperationException("The data type of this column is " + getDataType()); + } + + /** + * Returns the TIMESTAMP value of this {@code Column} as a Java LocalDateTime type. + * + * @return the value of this {@code Column}. if the value is NULL, null + */ + @Nullable + default LocalDateTime getTimestampValue() { + throw new UnsupportedOperationException("The data type of this column is " + getDataType()); + } + + /** + * Returns the TIMESTAMPTZ value of this {@code Column} as a Java Instant type. + * + * @return the value of this {@code Column}. if the value is NULL, null + */ + @Nullable + default Instant getTimestampTZValue() { + throw new UnsupportedOperationException("The data type of this column is " + getDataType()); + } + /** * Returns the value of this {@code Column} as a Java object type. * @@ -161,7 +205,10 @@ default byte[] getBlobValueAsBytes() { * {@code LONG} object. If the columns is a FLOAT type, it returns a {@code FLOAT} object. If the * columns is a DOUBLE type, it returns a {@code DOUBLE} object. If the columns is a TEXT type, it * returns a {@code String} object. If the columns is a BLOB type, it returns a {@code ByteBuffer} - * object. + * object. If the columns is a DATE type, it returns a {@code LocalDate} object. If the columns is + * a TIME type, it returns a {@code LocalTime} object. If the columns is a TIMESTAMP type, it + * returns a {@code LocalDateTime} object. If the columns is a TIMESTAMPTZ type, it returns a + * {@code Instant} object. * * @return the value of this {@code Column}. if the value is NULL, null */ diff --git a/core/src/main/java/com/scalar/db/io/ColumnVisitor.java b/core/src/main/java/com/scalar/db/io/ColumnVisitor.java index 3bab51d286..53e413154e 100644 --- a/core/src/main/java/com/scalar/db/io/ColumnVisitor.java +++ b/core/src/main/java/com/scalar/db/io/ColumnVisitor.java @@ -15,4 +15,12 @@ public interface ColumnVisitor { void visit(TextColumn column); void visit(BlobColumn column); + + void visit(DateColumn column); + + void visit(TimeColumn column); + + void visit(TimestampColumn column); + + void visit(TimestampTZColumn column); } diff --git a/core/src/main/java/com/scalar/db/io/DataType.java b/core/src/main/java/com/scalar/db/io/DataType.java index 12d345efbf..e72f2d2e4c 100644 --- a/core/src/main/java/com/scalar/db/io/DataType.java +++ b/core/src/main/java/com/scalar/db/io/DataType.java @@ -7,5 +7,9 @@ public enum DataType { FLOAT, DOUBLE, TEXT, - BLOB + BLOB, + DATE, + TIME, + TIMESTAMP, + TIMESTAMPTZ } diff --git a/core/src/main/java/com/scalar/db/io/DateColumn.java b/core/src/main/java/com/scalar/db/io/DateColumn.java new file mode 100644 index 0000000000..16c7dc4430 --- /dev/null +++ b/core/src/main/java/com/scalar/db/io/DateColumn.java @@ -0,0 +1,129 @@ +package com.scalar.db.io; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ComparisonChain; +import com.scalar.db.common.error.CoreError; +import java.time.LocalDate; +import java.util.Comparator; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +/** + * A {@code Column} for a DATE type. It represents a date without a time-zone in the ISO-8601 + * calendar system, such as 2007-12-03 + */ +@Immutable +public class DateColumn implements Column { + /** The minimum DATE value is 1000-01-01 */ + public static final LocalDate MIN_VALUE = LocalDate.of(1000, 1, 1); + /** The maximum DATE value is 9999-12-31 */ + public static final LocalDate MAX_VALUE = LocalDate.of(9999, 12, 31); + + private final String name; + @Nullable private final LocalDate value; + + private DateColumn(String name, @Nullable LocalDate value) { + if (value != null && (value.isBefore(MIN_VALUE) || value.isAfter(MAX_VALUE))) { + throw new IllegalArgumentException( + CoreError.OUT_OF_RANGE_COLUMN_VALUE_FOR_DATE.buildMessage(value)); + } + + this.name = Objects.requireNonNull(name); + this.value = value; + } + + @Override + public String getName() { + return name; + } + + @Override + public Optional getValue() { + return Optional.ofNullable(value); + } + + @Nullable + @Override + public LocalDate getDateValue() { + return value; + } + + @Override + public DateColumn copyWith(String name) { + return new DateColumn(name, value); + } + + @Override + public DataType getDataType() { + return DataType.DATE; + } + + @Override + public boolean hasNullValue() { + return value == null; + } + + @Nullable + @Override + public Object getValueAsObject() { + return value; + } + + @Override + public int compareTo(Column o) { + return ComparisonChain.start() + .compare(getName(), o.getName()) + .compareTrueFirst(hasNullValue(), o.hasNullValue()) + .compare(value, o.getDateValue(), Comparator.nullsFirst(Comparator.naturalOrder())) + .result(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DateColumn)) { + return false; + } + DateColumn that = (DateColumn) o; + return Objects.equals(name, that.name) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(name, value); + } + + @Override + public void accept(ColumnVisitor visitor) { + visitor.visit(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("value", value).toString(); + } + /** + * Returns a Date column instance with the specified column name and value. + * + * @param columnName a column name + * @param value a column value + * @return a Date column instance with the specified column name and value + */ + public static DateColumn of(String columnName, LocalDate value) { + return new DateColumn(columnName, value); + } + + /** + * Returns a Date column instance with the specified column name and a null value. + * + * @param columnName a column name + * @return a Date column instance with the specified column name and a null value + */ + public static DateColumn ofNull(String columnName) { + return new DateColumn(columnName, null); + } +} diff --git a/core/src/main/java/com/scalar/db/io/Key.java b/core/src/main/java/com/scalar/db/io/Key.java index 27e5696610..a9f8b26f9a 100644 --- a/core/src/main/java/com/scalar/db/io/Key.java +++ b/core/src/main/java/com/scalar/db/io/Key.java @@ -8,6 +8,10 @@ import com.scalar.db.common.error.CoreError; import com.scalar.db.util.ScalarDbUtils; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -274,6 +278,14 @@ private Column toColumn(String name, Object value) { return BlobColumn.of(name, (byte[]) value); } else if (value instanceof ByteBuffer) { return BlobColumn.of(name, (ByteBuffer) value); + } else if (value instanceof LocalDate) { + return DateColumn.of(name, (LocalDate) value); + } else if (value instanceof LocalTime) { + return TimeColumn.of(name, (LocalTime) value); + } else if (value instanceof LocalDateTime) { + return TimestampColumn.of(name, (LocalDateTime) value); + } else if (value instanceof Instant) { + return TimestampTZColumn.of(name, (Instant) value); } else { throw new IllegalArgumentException( CoreError.KEY_BUILD_ERROR_UNSUPPORTED_TYPE.buildMessage( @@ -417,6 +429,47 @@ public byte[] getBlobValueAsBytes(int i) { return columns.get(i).getBlobValueAsBytes(); } + /** + * Returns the DATE value of the i-th column which this key is composed of. + * + * @param i the position of the column which this key is composed of + * @return the DATE value of the i-th column which this key is composed of as LocalDate type + */ + public LocalDate getDateValue(int i) { + return columns.get(i).getDateValue(); + } + + /** + * Returns the TIME value of the i-th column which this key is composed of. + * + * @param i the position of the column which this key is composed of + * @return the TIME value of the i-th column which this key is composed of as LocalTime type + */ + public LocalTime getTimeValue(int i) { + return columns.get(i).getTimeValue(); + } + + /** + * Returns the TIMESTAMP value of the i-th column which this key is composed of. + * + * @param i the position of the column which this key is composed of + * @return the TIMESTAMP value of the i-th column which this key is composed of as LocalDateTime + * type + */ + public LocalDateTime getTimestampValue(int i) { + return columns.get(i).getTimestampValue(); + } + + /** + * Returns the TIMESTAMPTZ value of the i-th column which this key is composed of. + * + * @param i the position of the column which this key is composed of + * @return the TIMESTAMPTZ value of the i-th column which this key is composed of as Instant type + */ + public Instant getTimestampTZValue(int i) { + return columns.get(i).getTimestampTZValue(); + } + /** * Returns the value of the i-th column which this key is composed of as an Object type. * @@ -588,6 +641,50 @@ public static Key ofBlob(String columnName, ByteBuffer value) { return new Key(columnName, value); } + /** + * Creates a {@code Key} object with a single column with a DATE type + * + * @param columnName a column name + * @param value a DATE value of the column as LocalDate type + * @return a {@code Key} object + */ + public static Key ofDate(String columnName, LocalDate value) { + return new Key(Collections.singletonList(DateColumn.of(columnName, value))); + } + + /** + * Creates a {@code Key} object with a single column with a TIME type + * + * @param columnName a column name + * @param value a TIME value of the column as LocalTime type + * @return a {@code Key} object + */ + public static Key ofTime(String columnName, LocalTime value) { + return new Key(Collections.singletonList(TimeColumn.of(columnName, value))); + } + + /** + * Creates a {@code Key} object with a single column with a TIMESTAMP type + * + * @param columnName a column name + * @param value a TIMESTAMP value of the column as LocalDateTime type + * @return a {@code Key} object + */ + public static Key ofTimestamp(String columnName, LocalDateTime value) { + return new Key(Collections.singletonList(TimestampColumn.of(columnName, value))); + } + + /** + * Creates a {@code Key} object with a single column with a TIMESTAMPTZ type + * + * @param columnName a column name + * @param value a TIMESTAMPTZ value of the column as LocalDateTime type + * @return a {@code Key} object + */ + public static Key ofTimestampTZ(String columnName, Instant value) { + return new Key(Collections.singletonList(TimestampTZColumn.of(columnName, value))); + } + /** * Create an empty {@code Key} object * @@ -783,6 +880,53 @@ public Builder addBlob(String columnName, ByteBuffer value) { return this; } + /** + * Adds DATE value as an element of Key. + * + * @param columnName a column name to add + * @param value a DATE value to add as LocalDate type + * @return a builder object + */ + public Builder addDate(String columnName, LocalDate value) { + columns.add(DateColumn.of(columnName, value)); + return this; + } + + /** + * Adds TIME value as an element of Key. + * + * @param columnName a column name to add + * @param value a TIME value to add as LocalTime type + * @return a builder object + */ + public Builder addTime(String columnName, LocalTime value) { + columns.add(TimeColumn.of(columnName, value)); + return this; + } + /** + * Adds TIMESTAMP value as an element of Key. + * + * @param columnName a column name to add + * @param value a TIMESTAMP value to add as LocalDateTime type + * @return a builder object + */ + public Builder addTimestamp(String columnName, LocalDateTime value) { + columns.add(TimestampColumn.of(columnName, value)); + return this; + } + + /** + * Adds TIMESTAMPTZ value as an element of Key. + * + * @param columnName a column name to add + * @param value a TIMESTAMPTZ value to add as Instant type + * @return a builder object + */ + public Builder addTimestampTZ(String columnName, Instant value) { + columns.add(TimestampTZColumn.of(columnName, value)); + return this; + } + /** * @param value a value to add * @return a builder object diff --git a/core/src/main/java/com/scalar/db/io/TimeColumn.java b/core/src/main/java/com/scalar/db/io/TimeColumn.java new file mode 100644 index 0000000000..18e2cb19e3 --- /dev/null +++ b/core/src/main/java/com/scalar/db/io/TimeColumn.java @@ -0,0 +1,133 @@ +package com.scalar.db.io; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ComparisonChain; +import com.scalar.db.common.error.CoreError; +import java.time.LocalTime; +import java.util.Comparator; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +/** + * A {@code Column} for a TIME type. It represents a time without a time-zone in the ISO-8601 + * calendar system, such as 16:15:30, and can be expressed with microsecond precision. + */ +@Immutable +public class TimeColumn implements Column { + + /** The minimum TIME value is 00:00:00.000000 */ + public static final LocalTime MIN_VALUE = LocalTime.of(0, 0, 0, 0); + /** The maximum TIME value is 23:59:59.999999 */ + public static final LocalTime MAX_VALUE = LocalTime.of(23, 59, 59, 999_999_000); + /** The precision of a TIME is up to 1 microsecond. */ + public static final int FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS = 1_000; + + private final String name; + @Nullable private final LocalTime value; + + @SuppressWarnings("JavaLocalTimeGetNano") + private TimeColumn(String name, @Nullable LocalTime value) { + if (value != null && value.getNano() % FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS != 0) { + throw new IllegalArgumentException( + CoreError.SUBMICROSECOND_PRECISION_NOT_SUPPORTED_FOR_TIME.buildMessage(value)); + } + + this.name = Objects.requireNonNull(name); + this.value = value; + } + + @Override + public String getName() { + return name; + } + + @Override + public Optional getValue() { + return Optional.ofNullable(value); + } + + @Nullable + @Override + public LocalTime getTimeValue() { + return value; + } + + @Override + public TimeColumn copyWith(String name) { + return new TimeColumn(name, value); + } + + @Override + public DataType getDataType() { + return DataType.TIME; + } + + @Override + public boolean hasNullValue() { + return value == null; + } + + @Nullable + @Override + public Object getValueAsObject() { + return value; + } + + @Override + public int compareTo(Column o) { + return ComparisonChain.start() + .compare(getName(), o.getName()) + .compareTrueFirst(hasNullValue(), o.hasNullValue()) + .compare(value, o.getTimeValue(), Comparator.nullsFirst(Comparator.naturalOrder())) + .result(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TimeColumn)) { + return false; + } + TimeColumn that = (TimeColumn) o; + return Objects.equals(name, that.name) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(name, value); + } + + @Override + public void accept(ColumnVisitor visitor) { + visitor.visit(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("value", value).toString(); + } + /** + * Returns a Time column instance with the specified column name and value. + * + * @param columnName a column name + * @param value a column value + * @return a Time column instance with the specified column name and value + */ + public static TimeColumn of(String columnName, LocalTime value) { + return new TimeColumn(columnName, value); + } + + /** + * Returns a Time column instance with the specified column name and a null value. + * + * @param columnName a column name + * @return a Time column instance with the specified column name and a null value + */ + public static TimeColumn ofNull(String columnName) { + return new TimeColumn(columnName, null); + } +} diff --git a/core/src/main/java/com/scalar/db/io/TimestampColumn.java b/core/src/main/java/com/scalar/db/io/TimestampColumn.java new file mode 100644 index 0000000000..4e96ff3132 --- /dev/null +++ b/core/src/main/java/com/scalar/db/io/TimestampColumn.java @@ -0,0 +1,138 @@ +package com.scalar.db.io; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ComparisonChain; +import com.scalar.db.common.error.CoreError; +import java.time.LocalDateTime; +import java.util.Comparator; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +/** + * A {@code Column} for a TIMESTAMP type. It represents a date-time without a time-zone in the + * ISO-8601 calendar system, such as 2017-06-19T16:15:30, and can be expressed with millisecond + * precision. + */ +@Immutable +public class TimestampColumn implements Column { + /** The minimum TIMESTAMP value is 1000-01-01T00:00:00.000 */ + public static final LocalDateTime MIN_VALUE = LocalDateTime.of(1000, 1, 1, 0, 0); + /** The maximum TIMESTAMP value is 9999-12-31T23:59:59.999 */ + public static final LocalDateTime MAX_VALUE = + LocalDateTime.of(9999, 12, 31, 23, 59, 59, 999_000_000); + /** The precision of a TIMESTAMP is up to 1 millisecond. */ + public static final int FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS = 1_000_000; + + private final String name; + @Nullable private final LocalDateTime value; + + @SuppressWarnings("JavaLocalDateTimeGetNano") + private TimestampColumn(String name, @Nullable LocalDateTime value) { + if (value != null && (value.isBefore(MIN_VALUE) || value.isAfter(MAX_VALUE))) { + throw new IllegalArgumentException( + CoreError.OUT_OF_RANGE_COLUMN_VALUE_FOR_TIMESTAMP.buildMessage(value)); + } + if (value != null && value.getNano() % FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS != 0) { + throw new IllegalArgumentException( + CoreError.SUBMILLISECOND_PRECISION_NOT_SUPPORTED_FOR_TIMESTAMP.buildMessage(value)); + } + + this.name = Objects.requireNonNull(name); + this.value = value; + } + + @Override + public String getName() { + return name; + } + + @Override + public Optional getValue() { + return Optional.ofNullable(value); + } + + @Nullable + @Override + public LocalDateTime getTimestampValue() { + return value; + } + + @Override + public TimestampColumn copyWith(String name) { + return new TimestampColumn(name, value); + } + + @Override + public DataType getDataType() { + return DataType.TIMESTAMP; + } + + @Override + public boolean hasNullValue() { + return value == null; + } + + @Nullable + @Override + public Object getValueAsObject() { + return value; + } + + @Override + public int compareTo(Column o) { + return ComparisonChain.start() + .compare(getName(), o.getName()) + .compareTrueFirst(hasNullValue(), o.hasNullValue()) + .compare(value, o.getTimestampValue(), Comparator.nullsFirst(Comparator.naturalOrder())) + .result(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TimestampColumn)) { + return false; + } + TimestampColumn that = (TimestampColumn) o; + return Objects.equals(name, that.name) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(name, value); + } + + @Override + public void accept(ColumnVisitor visitor) { + visitor.visit(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("value", value).toString(); + } + /** + * Returns a Timestamp column instance with the specified column name and value. + * + * @param columnName a column name + * @param value a column value + * @return a Timestamp column instance with the specified column name and value + */ + public static TimestampColumn of(String columnName, LocalDateTime value) { + return new TimestampColumn(columnName, value); + } + + /** + * Returns a Timestamp column instance with the specified column name and a null value. + * + * @param columnName a column name + * @return a Timestamp column instance with the specified column name and a null value + */ + public static TimestampColumn ofNull(String columnName) { + return new TimestampColumn(columnName, null); + } +} diff --git a/core/src/main/java/com/scalar/db/io/TimestampTZColumn.java b/core/src/main/java/com/scalar/db/io/TimestampTZColumn.java new file mode 100644 index 0000000000..23f18b3857 --- /dev/null +++ b/core/src/main/java/com/scalar/db/io/TimestampTZColumn.java @@ -0,0 +1,141 @@ +package com.scalar.db.io; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ComparisonChain; +import com.scalar.db.common.error.CoreError; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.Comparator; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +/** + * A {@code Column} for a TIMESTAMPTZ type. It represents a date-time in the UTC time zone in the + * ISO-8601 calendar system, such as 2017-06-19T16:15:30Z, and can be expressed with millisecond + * precision. + */ +@Immutable +public class TimestampTZColumn implements Column { + /** The minimum TIMESTAMPTZ value is 1000-01-01T00:00:00.000Z */ + public static final Instant MIN_VALUE = + LocalDateTime.of(1000, 1, 1, 0, 0).toInstant(ZoneOffset.UTC); + /** The maximum TIMESTAMPTZ value is 9999-12-31T23:59:59.999Z */ + public static final Instant MAX_VALUE = + LocalDateTime.of(9999, 12, 31, 23, 59, 59, 999_000_000).toInstant(ZoneOffset.UTC); + /** The precision of a TIMESTAMPTZ is up to 1 millisecond. */ + public static final int FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS = 1_000_000; + + private final String name; + @Nullable private final Instant value; + + @SuppressWarnings("JavaInstantGetSecondsGetNano") + private TimestampTZColumn(String name, @Nullable Instant value) { + if (value != null && (value.isBefore(MIN_VALUE) || value.isAfter(MAX_VALUE))) { + throw new IllegalArgumentException( + CoreError.OUT_OF_RANGE_COLUMN_VALUE_FOR_TIMESTAMPTZ.buildMessage(value)); + } + if (value != null && value.getNano() % FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS != 0) { + throw new IllegalArgumentException( + CoreError.SUBMILLISECOND_PRECISION_NOT_SUPPORTED_FOR_TIMESTAMPTZ.buildMessage(value)); + } + + this.name = Objects.requireNonNull(name); + this.value = value; + } + + @Override + public String getName() { + return name; + } + + @Override + public Optional getValue() { + return Optional.ofNullable(value); + } + + @Nullable + @Override + public Instant getTimestampTZValue() { + return value; + } + + @Override + public TimestampTZColumn copyWith(String name) { + return new TimestampTZColumn(name, value); + } + + @Override + public DataType getDataType() { + return DataType.TIMESTAMPTZ; + } + + @Override + public boolean hasNullValue() { + return value == null; + } + + @Nullable + @Override + public Object getValueAsObject() { + return value; + } + + @Override + public int compareTo(Column o) { + return ComparisonChain.start() + .compare(getName(), o.getName()) + .compareTrueFirst(hasNullValue(), o.hasNullValue()) + .compare(value, o.getTimestampTZValue(), Comparator.nullsFirst(Comparator.naturalOrder())) + .result(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TimestampTZColumn)) { + return false; + } + TimestampTZColumn that = (TimestampTZColumn) o; + return Objects.equals(name, that.name) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(name, value); + } + + @Override + public void accept(ColumnVisitor visitor) { + visitor.visit(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("value", value).toString(); + } + /** + * Returns a TimestampTZ column instance with the specified column name and value. + * + * @param columnName a column name + * @param value a column value + * @return a TimestampTZ column instance with the specified column name and value + */ + public static TimestampTZColumn of(String columnName, Instant value) { + return new TimestampTZColumn(columnName, value); + } + + /** + * Returns a TimestampTZ column instance with the specified column name and a null value. + * + * @param columnName a column name + * @return a TimestampTZ column instance with the specified column name and a null value + */ + public static TimestampTZColumn ofNull(String columnName) { + return new TimestampTZColumn(columnName, null); + } +} diff --git a/core/src/main/java/com/scalar/db/service/AdminService.java b/core/src/main/java/com/scalar/db/service/AdminService.java index 46bf250049..a40181e67d 100644 --- a/core/src/main/java/com/scalar/db/service/AdminService.java +++ b/core/src/main/java/com/scalar/db/service/AdminService.java @@ -94,9 +94,10 @@ public void addNewColumnToTable( } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) throws ExecutionException { - return admin.getImportTableMetadata(namespace, table); + return admin.getImportTableMetadata(namespace, table, overrideColumnsType); } @Override @@ -107,9 +108,13 @@ public void addRawColumnToTable( } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { - admin.importTable(namespace, table, options); + admin.importTable(namespace, table, options, overrideColumnsType); } @Override diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/CassandraAdmin.java b/core/src/main/java/com/scalar/db/storage/cassandra/CassandraAdmin.java index 94272f18f2..4dcb46c2e5 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/CassandraAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/CassandraAdmin.java @@ -65,6 +65,12 @@ public CassandraAdmin(DatabaseConfig config) { public void createTable( String namespace, String table, TableMetadata metadata, Map options) throws ExecutionException { + for (String column : metadata.getColumnNames()) { + if (metadata.getColumnDataTypes().get(column).equals(DataType.TIMESTAMP)) { + throw new UnsupportedOperationException( + "The TIMESTAMP data type is not supported in Cassandra. column: " + column); + } + } try { createNamespacesTableIfNotExists(); createTableInternal(namespace, table, metadata, false, options); @@ -269,7 +275,8 @@ private TableMetadata createTableMetadata(com.datastax.driver.core.TableMetadata } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) { + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) { throw new UnsupportedOperationException( CoreError.CASSANDRA_IMPORT_NOT_SUPPORTED.buildMessage()); } @@ -282,7 +289,11 @@ public void addRawColumnToTable( } @Override - public void importTable(String namespace, String table, Map options) { + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) { throw new UnsupportedOperationException( CoreError.CASSANDRA_IMPORT_NOT_SUPPORTED.buildMessage()); } @@ -370,6 +381,10 @@ public void repairTable( public void addNewColumnToTable( String namespace, String table, String columnName, DataType columnType) throws ExecutionException { + if (columnType == DataType.TIMESTAMP) { + throw new UnsupportedOperationException( + "The TIMESTAMP data type is not supported in Cassandra. column: " + columnName); + } try { String alterTableQuery = SchemaBuilder.alterTable(namespace, table) @@ -583,6 +598,12 @@ private DataType fromCassandraDataType( return DataType.BOOLEAN; case BLOB: return DataType.BLOB; + case DATE: + return DataType.DATE; + case TIME: + return DataType.TIME; + case TIMESTAMP: + return DataType.TIMESTAMPTZ; default: throw new ExecutionException( String.format("%s is not yet supported", cassandraDataTypeName)); @@ -610,6 +631,12 @@ private com.datastax.driver.core.DataType toCassandraDataType(DataType dataType) return com.datastax.driver.core.DataType.text(); case BLOB: return com.datastax.driver.core.DataType.blob(); + case DATE: + return com.datastax.driver.core.DataType.date(); + case TIME: + return com.datastax.driver.core.DataType.time(); + case TIMESTAMPTZ: + return com.datastax.driver.core.DataType.timestamp(); default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/DeleteStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/DeleteStatementHandler.java index cc2e3c78bf..365b7a2082 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/DeleteStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/DeleteStatementHandler.java @@ -63,12 +63,16 @@ private com.datastax.driver.core.querybuilder.Delete prepare(Delete del) { com.datastax.driver.core.querybuilder.Delete.Where where = delete.where(); del.getPartitionKey() + .getColumns() .forEach(v -> where.and(QueryBuilder.eq(quoteIfNecessary(v.getName()), bindMarker()))); del.getClusteringKey() .ifPresent( k -> - k.forEach( - v -> where.and(QueryBuilder.eq(quoteIfNecessary(v.getName()), bindMarker())))); + k.getColumns() + .forEach( + v -> + where.and( + QueryBuilder.eq(quoteIfNecessary(v.getName()), bindMarker())))); setCondition(where, del); diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/InsertStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/InsertStatementHandler.java index 8aa029c8c7..f59c0fb27e 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/InsertStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/InsertStatementHandler.java @@ -63,9 +63,14 @@ private Insert prepare(Put put) { insertInto( quoteIfNecessary(put.forNamespace().get()), quoteIfNecessary(put.forTable().get())); - put.getPartitionKey().forEach(v -> insert.value(quoteIfNecessary(v.getName()), bindMarker())); + put.getPartitionKey() + .getColumns() + .forEach(v -> insert.value(quoteIfNecessary(v.getName()), bindMarker())); put.getClusteringKey() - .ifPresent(k -> k.forEach(v -> insert.value(quoteIfNecessary(v.getName()), bindMarker()))); + .ifPresent( + k -> + k.getColumns() + .forEach(v -> insert.value(quoteIfNecessary(v.getName()), bindMarker()))); put.getColumns().keySet().forEach(n -> insert.value(quoteIfNecessary(n), bindMarker())); setCondition(insert, put); diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/cassandra/ResultInterpreter.java index 58db55bf5a..9ad7948ecb 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/ResultInterpreter.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/ResultInterpreter.java @@ -9,11 +9,16 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampTZColumn; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.time.LocalDate; +import java.time.LocalTime; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -71,6 +76,21 @@ private Column convert(Row row, String name, DataType type) { : TextColumn.of(name, row.getString(name)); case BLOB: return row.isNull(name) ? BlobColumn.ofNull(name) : BlobColumn.of(name, row.getBytes(name)); + case DATE: + return row.isNull(name) + ? DateColumn.ofNull(name) + : DateColumn.of(name, LocalDate.ofEpochDay(row.getDate(name).getDaysSinceEpoch())); + case TIME: + return row.isNull(name) + ? TimeColumn.ofNull(name) + : TimeColumn.of(name, LocalTime.ofNanoOfDay(row.getTime(name))); + case TIMESTAMP: + throw new UnsupportedOperationException( + "The TIMESTAMP type is not supported with Cassandra."); + case TIMESTAMPTZ: + return row.isNull(name) + ? TimestampTZColumn.ofNull(name) + : TimestampTZColumn.of(name, row.getTimestamp(name).toInstant()); default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java index 4d6b335b6d..1ca9ddb851 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java @@ -171,7 +171,9 @@ private void createStatement(Select.Where statement, Scan scan) { private void setKey(Select.Where statement, Optional key) { key.ifPresent( - k -> k.forEach(v -> statement.and(eq(quoteIfNecessary(v.getName()), bindMarker())))); + k -> + k.getColumns() + .forEach(v -> statement.and(eq(quoteIfNecessary(v.getName()), bindMarker())))); } private void setStart(Select.Where statement, Scan scan, Set traveledEqualKeySet) { diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/UpdateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/UpdateStatementHandler.java index f2341acd0a..f4746dfa2f 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/UpdateStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/UpdateStatementHandler.java @@ -68,12 +68,16 @@ private Update prepare(Put put) { put.getColumns().keySet().forEach(n -> assignments.and(set(quoteIfNecessary(n), bindMarker()))); Update.Where where = update.where(); put.getPartitionKey() + .getColumns() .forEach(v -> where.and(QueryBuilder.eq(quoteIfNecessary(v.getName()), bindMarker()))); put.getClusteringKey() .ifPresent( k -> - k.forEach( - v -> where.and(QueryBuilder.eq(quoteIfNecessary(v.getName()), bindMarker())))); + k.getColumns() + .forEach( + v -> + where.and( + QueryBuilder.eq(quoteIfNecessary(v.getName()), bindMarker())))); setCondition(where, put); diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/ValueBinder.java b/core/src/main/java/com/scalar/db/storage/cassandra/ValueBinder.java index 41a4d0d635..04a39763cc 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/ValueBinder.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/ValueBinder.java @@ -7,12 +7,18 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.nio.ByteBuffer; +import java.time.LocalDate; +import java.util.Date; import javax.annotation.concurrent.NotThreadSafe; /** @@ -101,4 +107,43 @@ public void visit(BlobColumn column) { bound.setBytes(i++, (ByteBuffer) ByteBuffer.allocate(b.length).put(b).flip()); } } + + @Override + public void visit(DateColumn column) { + if (column.hasNullValue()) { + bound.setToNull(i++); + } else { + assert column.getDateValue() != null; + LocalDate date = column.getDateValue(); + bound.setDate( + i++, + com.datastax.driver.core.LocalDate.fromYearMonthDay( + date.getYear(), date.getMonthValue(), date.getDayOfMonth())); + } + } + + @Override + public void visit(TimeColumn column) { + if (column.hasNullValue()) { + bound.setToNull(i++); + } else { + assert column.getTimeValue() != null; + bound.setTime(i++, column.getTimeValue().toNanoOfDay()); + } + } + + @Override + public void visit(TimestampColumn column) { + throw new UnsupportedOperationException("The TIMESTAMP type is not supported with Cassandra"); + } + + @Override + public void visit(TimestampTZColumn column) { + if (column.hasNullValue()) { + bound.setToNull(i++); + } else { + assert column.getTimestampTZValue() != null; + bound.setTimestamp(i++, Date.from(column.getTimestampTZValue())); + } + } } diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/ConcatenationVisitor.java b/core/src/main/java/com/scalar/db/storage/cosmos/ConcatenationVisitor.java index a96429945a..7709a94c0d 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/ConcatenationVisitor.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/ConcatenationVisitor.java @@ -4,10 +4,15 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import java.util.ArrayList; import java.util.Base64; import java.util.List; @@ -109,4 +114,28 @@ public void visit(BlobColumn column) { columns.add( Base64.getUrlEncoder().withoutPadding().encodeToString(column.getBlobValueAsBytes())); } + + @Override + public void visit(DateColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimeColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimestampColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimestampTZColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } } diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/CosmosAdmin.java b/core/src/main/java/com/scalar/db/storage/cosmos/CosmosAdmin.java index 8382abe010..8de1848bf5 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/CosmosAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/CosmosAdmin.java @@ -518,6 +518,14 @@ private DataType convertDataType(String columnType) throws ExecutionException { return DataType.BOOLEAN; case "blob": return DataType.BLOB; + case "date": + return DataType.DATE; + case "time": + return DataType.TIME; + case "timestamp": + return DataType.TIMESTAMP; + case "timestamptz": + return DataType.TIMESTAMPTZ; default: throw new ExecutionException("Unknown column type: " + columnType); } @@ -628,7 +636,8 @@ columnName, getFullTableName(namespace, table)), } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) { + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) { throw new UnsupportedOperationException(CoreError.COSMOS_IMPORT_NOT_SUPPORTED.buildMessage()); } @@ -639,7 +648,11 @@ public void addRawColumnToTable( } @Override - public void importTable(String namespace, String table, Map options) { + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) { throw new UnsupportedOperationException(CoreError.COSMOS_IMPORT_NOT_SUPPORTED.buildMessage()); } diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/CosmosOperationChecker.java b/core/src/main/java/com/scalar/db/storage/cosmos/CosmosOperationChecker.java index 9c81442d4b..77cbe7e364 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/CosmosOperationChecker.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/CosmosOperationChecker.java @@ -18,10 +18,14 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; public class CosmosOperationChecker extends OperationChecker { @@ -71,6 +75,18 @@ public void visit(TextColumn column) { @Override public void visit(BlobColumn column) {} + + @Override + public void visit(DateColumn column) {} + + @Override + public void visit(TimeColumn column) {} + + @Override + public void visit(TimestampColumn column) {} + + @Override + public void visit(TimestampTZColumn column) {} }; public CosmosOperationChecker( diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/MapVisitor.java b/core/src/main/java/com/scalar/db/storage/cosmos/MapVisitor.java index 5f2c0f2f43..b9bd73835b 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/MapVisitor.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/MapVisitor.java @@ -4,10 +4,15 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.HashMap; import java.util.Map; @@ -61,4 +66,32 @@ public void visit(TextColumn column) { public void visit(BlobColumn column) { values.put(column.getName(), column.hasNullValue() ? null : column.getBlobValue()); } + + @Override + public void visit(DateColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimeColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampTZColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } } diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/cosmos/ResultInterpreter.java index d5861d5fcf..95c546ffa5 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/ResultInterpreter.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/ResultInterpreter.java @@ -8,10 +8,15 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Base64; import java.util.HashMap; @@ -89,6 +94,31 @@ private Column convert(@Nullable Object recordValue, String name, DataType da return recordValue == null ? BlobColumn.ofNull(name) : BlobColumn.of(name, Base64.getDecoder().decode((String) recordValue)); + case DATE: + return recordValue == null + ? DateColumn.ofNull(name) + : DateColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeDate(((Number) recordValue).longValue())); + case TIME: + return recordValue == null + ? TimeColumn.ofNull(name) + : TimeColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTime(((Number) recordValue).longValue())); + case TIMESTAMP: + return recordValue == null + ? TimestampColumn.ofNull(name) + : TimestampColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestamp(((Number) recordValue).longValue())); + case TIMESTAMPTZ: + return recordValue == null + ? TimestampTZColumn.ofNull(name) + : TimestampTZColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + ((Number) recordValue).longValue())); default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/ValueBinder.java b/core/src/main/java/com/scalar/db/storage/cosmos/ValueBinder.java index d54dec418d..ab6dec2423 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/ValueBinder.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/ValueBinder.java @@ -4,10 +4,15 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import java.util.Base64; import java.util.function.Consumer; import javax.annotation.concurrent.NotThreadSafe; @@ -65,4 +70,40 @@ public void visit(BlobColumn column) { consumer.accept(Base64.getEncoder().encodeToString(column.getBlobValueAsBytes())); } } + + @Override + public void visit(DateColumn column) { + if (column.hasNullValue()) { + consumer.accept(null); + } else { + consumer.accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + } + + @Override + public void visit(TimeColumn column) { + if (column.hasNullValue()) { + consumer.accept(null); + } else { + consumer.accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + } + + @Override + public void visit(TimestampColumn column) { + if (column.hasNullValue()) { + consumer.accept(null); + } else { + consumer.accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + } + + @Override + public void visit(TimestampTZColumn column) { + if (column.hasNullValue()) { + consumer.accept(null); + } else { + consumer.accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + } } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/DynamoAdmin.java b/core/src/main/java/com/scalar/db/storage/dynamo/DynamoAdmin.java index b7d9b3d733..9c9d8f39c5 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/DynamoAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/DynamoAdmin.java @@ -127,6 +127,10 @@ public class DynamoAdmin implements DistributedStorageAdmin { .put(DataType.DOUBLE, ScalarAttributeType.N) .put(DataType.TEXT, ScalarAttributeType.S) .put(DataType.BLOB, ScalarAttributeType.B) + .put(DataType.DATE, ScalarAttributeType.N) + .put(DataType.TIME, ScalarAttributeType.N) + .put(DataType.TIMESTAMP, ScalarAttributeType.N) + .put(DataType.TIMESTAMPTZ, ScalarAttributeType.N) .build(); private static final ImmutableSet TABLE_SCALING_TYPE_SET = ImmutableSet.builder().add(SCALING_TYPE_READ).add(SCALING_TYPE_WRITE).build(); @@ -1245,6 +1249,14 @@ private DataType convertDataType(String columnType) throws ExecutionException { return DataType.BOOLEAN; case "blob": return DataType.BLOB; + case "date": + return DataType.DATE; + case "time": + return DataType.TIME; + case "timestamp": + return DataType.TIMESTAMP; + case "timestamptz": + return DataType.TIMESTAMPTZ; default: throw new ExecutionException("Unknown column type: " + columnType); } @@ -1351,7 +1363,8 @@ columnName, getFullTableName(namespace, table)), } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) { + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) { throw new UnsupportedOperationException( "Import-related functionality is not supported in DynamoDB"); } @@ -1364,7 +1377,11 @@ public void addRawColumnToTable( } @Override - public void importTable(String namespace, String table, Map options) { + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) { throw new UnsupportedOperationException( "Import-related functionality is not supported in DynamoDB"); } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/dynamo/ResultInterpreter.java index 00b04dab24..d5b724d208 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/ResultInterpreter.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/ResultInterpreter.java @@ -8,10 +8,15 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.HashMap; import java.util.List; @@ -72,6 +77,28 @@ private Column convert(@Nullable AttributeValue itemValue, String name, DataT return isNull ? TextColumn.ofNull(name) : TextColumn.of(name, itemValue.s()); case BLOB: return isNull ? BlobColumn.ofNull(name) : BlobColumn.of(name, itemValue.b().asByteArray()); + case DATE: + return isNull + ? DateColumn.ofNull(name) + : DateColumn.of( + name, TimeRelatedColumnEncodingUtils.decodeDate(Long.parseLong(itemValue.n()))); + case TIME: + return isNull + ? TimeColumn.ofNull(name) + : TimeColumn.of( + name, TimeRelatedColumnEncodingUtils.decodeTime(Long.parseLong(itemValue.n()))); + case TIMESTAMP: + return isNull + ? TimestampColumn.ofNull(name) + : TimestampColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestamp(Long.parseLong(itemValue.n()))); + case TIMESTAMPTZ: + return isNull + ? TimestampTZColumn.ofNull(name) + : TimestampTZColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestampTZ(Long.parseLong(itemValue.n()))); default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/ValueBinder.java b/core/src/main/java/com/scalar/db/storage/dynamo/ValueBinder.java index 6b6fe350e8..af7e15b9ed 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/ValueBinder.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/ValueBinder.java @@ -4,10 +4,15 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.HashMap; import java.util.Map; @@ -112,4 +117,60 @@ public void visit(BlobColumn column) { } i++; } + + @Override + public void visit(DateColumn column) { + if (column.hasNullValue()) { + values.put(alias + i, AttributeValue.builder().nul(true).build()); + } else { + values.put( + alias + i, + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build()); + } + i++; + } + + @Override + public void visit(TimeColumn column) { + if (column.hasNullValue()) { + values.put(alias + i, AttributeValue.builder().nul(true).build()); + } else { + values.put( + alias + i, + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build()); + } + i++; + } + + @Override + public void visit(TimestampColumn column) { + if (column.hasNullValue()) { + values.put(alias + i, AttributeValue.builder().nul(true).build()); + } else { + values.put( + alias + i, + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build()); + } + i++; + } + + @Override + public void visit(TimestampTZColumn column) { + if (column.hasNullValue()) { + values.put(alias + i, AttributeValue.builder().nul(true).build()); + } else { + values.put( + alias + i, + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build()); + } + i++; + } } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BigIntBytesEncoder.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BigIntBytesEncoder.java index 85bd126ea5..91270e06ec 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BigIntBytesEncoder.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BigIntBytesEncoder.java @@ -1,6 +1,6 @@ package com.scalar.db.storage.dynamo.bytes; -import static com.scalar.db.storage.dynamo.bytes.BytesUtils.mask; +import static com.scalar.db.storage.dynamo.bytes.BytesUtils.encodeLong; import com.scalar.db.api.Scan.Ordering.Order; import com.scalar.db.io.BigIntColumn; @@ -21,14 +21,6 @@ public int encodedLength(BigIntColumn column, Order order) { public void encode(BigIntColumn column, Order order, ByteBuffer dst) { assert !column.hasNullValue(); - long v = column.getBigIntValue(); - dst.put(mask((byte) ((v >> 56) ^ 0x80), order)); // Flip a sign bit to make it binary comparable - dst.put(mask((byte) (v >> 48), order)); - dst.put(mask((byte) (v >> 40), order)); - dst.put(mask((byte) (v >> 32), order)); - dst.put(mask((byte) (v >> 24), order)); - dst.put(mask((byte) (v >> 16), order)); - dst.put(mask((byte) (v >> 8), order)); - dst.put(mask((byte) v, order)); + encodeLong(column.getBigIntValue(), order, dst); } } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesEncoders.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesEncoders.java index c92e181a0d..12f94692b1 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesEncoders.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesEncoders.java @@ -8,4 +8,8 @@ public final class BytesEncoders { public static final DoubleBytesEncoder DOUBLE = new DoubleBytesEncoder(); public static final TextBytesEncoder TEXT = new TextBytesEncoder(); public static final BlobBytesEncoder BLOB = new BlobBytesEncoder(); + public static final DateBytesEncoder DATE = new DateBytesEncoder(); + public static final TimeBytesEncoder TIME = new TimeBytesEncoder(); + public static final TimestampBytesEncoder TIMESTAMP = new TimestampBytesEncoder(); + public static final TimestampTZBytesEncoder TIMESTAMPTZ = new TimestampTZBytesEncoder(); } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesUtils.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesUtils.java index 2f57d69be1..4d32f75154 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesUtils.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/BytesUtils.java @@ -110,4 +110,16 @@ public static byte[] toBytes(ByteBuffer src) { src.get(bytes); return bytes; } + + static void encodeLong(long value, Order order, ByteBuffer dst) { + dst.put( + mask((byte) ((value >> 56) ^ 0x80), order)); // Flip a sign bit to make it binary comparable + dst.put(mask((byte) (value >> 48), order)); + dst.put(mask((byte) (value >> 40), order)); + dst.put(mask((byte) (value >> 32), order)); + dst.put(mask((byte) (value >> 24), order)); + dst.put(mask((byte) (value >> 16), order)); + dst.put(mask((byte) (value >> 8), order)); + dst.put(mask((byte) value, order)); + } } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/DateBytesEncoder.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/DateBytesEncoder.java new file mode 100644 index 0000000000..7dd8c93f3e --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/DateBytesEncoder.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.dynamo.bytes; + +import static com.scalar.db.storage.dynamo.bytes.BytesUtils.encodeLong; + +import com.scalar.db.api.Scan.Ordering.Order; +import com.scalar.db.io.DateColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class DateBytesEncoder implements BytesEncoder { + + DateBytesEncoder() {} + + @Override + public int encodedLength(DateColumn column, Order order) { + assert column.getValue().isPresent(); + + return 8; + } + + @Override + public void encode(DateColumn column, Order order, ByteBuffer dst) { + assert column.getValue().isPresent(); + + long value = TimeRelatedColumnEncodingUtils.encode(column); + encodeLong(value, order, dst); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncodedLengthCalculator.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncodedLengthCalculator.java index 3fb4d75211..3e86ed839d 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncodedLengthCalculator.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncodedLengthCalculator.java @@ -6,11 +6,15 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Map; import javax.annotation.concurrent.NotThreadSafe; @@ -75,4 +79,32 @@ public void visit(BlobColumn value) { length += BytesEncoders.BLOB.encodedLength(value, keyOrders.getOrDefault(value.getName(), Order.ASC)); } + + @Override + public void visit(DateColumn column) { + length += + BytesEncoders.DATE.encodedLength( + column, keyOrders.getOrDefault(column.getName(), Order.ASC)); + } + + @Override + public void visit(TimeColumn column) { + length += + BytesEncoders.TIME.encodedLength( + column, keyOrders.getOrDefault(column.getName(), Order.ASC)); + } + + @Override + public void visit(TimestampColumn column) { + length += + BytesEncoders.TIMESTAMP.encodedLength( + column, keyOrders.getOrDefault(column.getName(), Order.ASC)); + } + + @Override + public void visit(TimestampTZColumn column) { + length += + BytesEncoders.TIMESTAMPTZ.encodedLength( + column, keyOrders.getOrDefault(column.getName(), Order.ASC)); + } } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncoder.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncoder.java index 3471aa59ba..5e4ce378e9 100644 --- a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncoder.java +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/KeyBytesEncoder.java @@ -6,11 +6,15 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.nio.ByteBuffer; import java.util.Collections; @@ -74,4 +78,26 @@ public void visit(TextColumn value) { public void visit(BlobColumn value) { BytesEncoders.BLOB.encode(value, keyOrders.getOrDefault(value.getName(), Order.ASC), dst); } + + @Override + public void visit(DateColumn column) { + BytesEncoders.DATE.encode(column, keyOrders.getOrDefault(column.getName(), Order.ASC), dst); + } + + @Override + public void visit(TimeColumn column) { + BytesEncoders.TIME.encode(column, keyOrders.getOrDefault(column.getName(), Order.ASC), dst); + } + + @Override + public void visit(TimestampColumn column) { + BytesEncoders.TIMESTAMP.encode( + column, keyOrders.getOrDefault(column.getName(), Order.ASC), dst); + } + + @Override + public void visit(TimestampTZColumn column) { + BytesEncoders.TIMESTAMPTZ.encode( + column, keyOrders.getOrDefault(column.getName(), Order.ASC), dst); + } } diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimeBytesEncoder.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimeBytesEncoder.java new file mode 100644 index 0000000000..6614c699a5 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimeBytesEncoder.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.dynamo.bytes; + +import static com.scalar.db.storage.dynamo.bytes.BytesUtils.encodeLong; + +import com.scalar.db.api.Scan.Ordering.Order; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class TimeBytesEncoder implements BytesEncoder { + + TimeBytesEncoder() {} + + @Override + public int encodedLength(TimeColumn column, Order order) { + assert column.getValue().isPresent(); + + return 8; + } + + @Override + public void encode(TimeColumn column, Order order, ByteBuffer dst) { + assert !column.hasNullValue(); + + long value = TimeRelatedColumnEncodingUtils.encode(column); + encodeLong(value, order, dst); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampBytesEncoder.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampBytesEncoder.java new file mode 100644 index 0000000000..2416669d06 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampBytesEncoder.java @@ -0,0 +1,27 @@ +package com.scalar.db.storage.dynamo.bytes; + +import static com.scalar.db.storage.dynamo.bytes.BytesUtils.encodeLong; + +import com.scalar.db.api.Scan.Ordering.Order; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class TimestampBytesEncoder implements BytesEncoder { + TimestampBytesEncoder() {} + + @Override + public int encodedLength(TimestampColumn column, Order order) { + return 8; + } + + @Override + public void encode(TimestampColumn column, Order order, ByteBuffer dst) { + assert !column.hasNullValue(); + + long value = TimeRelatedColumnEncodingUtils.encode(column); + encodeLong(value, order, dst); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampTZBytesEncoder.java b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampTZBytesEncoder.java new file mode 100644 index 0000000000..fd4713397f --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/dynamo/bytes/TimestampTZBytesEncoder.java @@ -0,0 +1,27 @@ +package com.scalar.db.storage.dynamo.bytes; + +import static com.scalar.db.storage.dynamo.bytes.BytesUtils.encodeLong; + +import com.scalar.db.api.Scan.Ordering.Order; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class TimestampTZBytesEncoder implements BytesEncoder { + TimestampTZBytesEncoder() {} + + @Override + public int encodedLength(TimestampTZColumn column, Order order) { + return 8; + } + + @Override + public void encode(TimestampTZColumn column, Order order, ByteBuffer dst) { + assert !column.hasNullValue(); + + long value = TimeRelatedColumnEncodingUtils.encode(column); + encodeLong(value, order, dst); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/AbstractRdbEngine.java b/core/src/main/java/com/scalar/db/storage/jdbc/AbstractRdbEngine.java new file mode 100644 index 0000000000..d52bbd00de --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/AbstractRdbEngine.java @@ -0,0 +1,38 @@ +package com.scalar.db.storage.jdbc; + +import com.scalar.db.common.error.CoreError; +import com.scalar.db.io.DataType; +import java.sql.JDBCType; +import javax.annotation.Nullable; + +public abstract class AbstractRdbEngine implements RdbEngineStrategy { + + @Override + public final DataType getDataTypeForScalarDb( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType) { + DataType dataType = + getDataTypeForScalarDbInternal( + type, typeName, columnSize, digits, columnDescription, overrideDataType); + + if (overrideDataType != null && overrideDataType != dataType) { + throw new IllegalArgumentException( + CoreError.JDBC_IMPORT_DATA_TYPE_OVERRIDE_NOT_SUPPORTED.buildMessage( + typeName, overrideDataType, columnDescription)); + } + + return dataType; + } + + abstract DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType); +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcAdmin.java b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcAdmin.java index 903ec00e31..4e4512c216 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcAdmin.java @@ -494,7 +494,8 @@ public TableMetadata getTableMetadata(String namespace, String table) throws Exe } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) throws ExecutionException { TableMetadata.Builder builder = TableMetadata.newBuilder(); boolean primaryKeyExists = false; @@ -530,14 +531,16 @@ public TableMetadata getImportTableMetadata(String namespace, String table) resultSet = metadata.getColumns(catalogName, schemaName, table, "%"); while (resultSet.next()) { String columnName = resultSet.getString(JDBC_COL_COLUMN_NAME); - builder.addColumn( - columnName, + DataType overrideDataType = overrideColumnsType.get(columnName); + DataType dataType = rdbEngine.getDataTypeForScalarDb( getJdbcType(resultSet.getInt(JDBC_COL_DATA_TYPE)), resultSet.getString(JDBC_COL_TYPE_NAME), resultSet.getInt(JDBC_COL_COLUMN_SIZE), resultSet.getInt(JDBC_COL_DECIMAL_DIGITS), - getFullTableName(namespace, table) + " " + columnName)); + getFullTableName(namespace, table) + " " + columnName, + overrideDataType); + builder.addColumn(columnName, dataType); } } catch (SQLException e) { throw new ExecutionException( @@ -551,10 +554,14 @@ public TableMetadata getImportTableMetadata(String namespace, String table) } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { try (Connection connection = dataSource.getConnection()) { - TableMetadata tableMetadata = getImportTableMetadata(namespace, table); + TableMetadata tableMetadata = getImportTableMetadata(namespace, table, overrideColumnsType); createNamespacesTableIfNotExists(connection); upsertIntoNamespacesTable(connection, namespace); addTableMetadata(connection, namespace, table, tableMetadata, true, false); diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcConfig.java b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcConfig.java index 58c59d79f7..37f8692372 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcConfig.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcConfig.java @@ -6,6 +6,8 @@ import com.scalar.db.common.error.CoreError; import com.scalar.db.config.DatabaseConfig; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; import java.util.Locale; import java.util.Optional; import javax.annotation.Nullable; @@ -51,7 +53,8 @@ public class JdbcConfig { PREFIX + "mysql.variable_key_column_size"; public static final String ORACLE_VARIABLE_KEY_COLUMN_SIZE = PREFIX + "oracle.variable_key_column_size"; - + public static final String ORACLE_TIME_COLUMN_DEFAULT_DATE_COMPONENT = + PREFIX + "oracle.time_column.default_date_component"; public static final int DEFAULT_CONNECTION_POOL_MIN_IDLE = 20; public static final int DEFAULT_CONNECTION_POOL_MAX_IDLE = 50; public static final int DEFAULT_CONNECTION_POOL_MAX_TOTAL = 200; @@ -85,6 +88,11 @@ public class JdbcConfig { // 25-byte prefix for the group commit feature; thus, we set 64 bytes as the minimum. public static final int MINIMUM_VARIABLE_KEY_COLUMN_SIZE = 64; + // In Oracle, there is no data type to only store a time component without a date. So ScalarDB + // stores every value of the TIME type with the same date component columns for ease of + // comparison and sorting. The default date component is 1970-01-01. + public static final String DEFAULT_ORACLE_TIME_COLUMN_DEFAULT_DATE_COMPONENT = "1970-01-01"; + private final String jdbcUrl; @Nullable private final String username; @Nullable private final String password; @@ -109,6 +117,8 @@ public class JdbcConfig { private final int mysqlVariableKeyColumnSize; private final int oracleVariableKeyColumnSize; + private final LocalDate oracleTimeColumnDefaultDateComponent; + public JdbcConfig(DatabaseConfig databaseConfig) { String storage = databaseConfig.getStorage(); String transactionManager = databaseConfig.getTransactionManager(); @@ -211,6 +221,16 @@ public JdbcConfig(DatabaseConfig databaseConfig) { throw new IllegalArgumentException(CoreError.INVALID_VARIABLE_KEY_COLUMN_SIZE.buildMessage()); } + String oracleTimeColumnDefaultDateComponentString = + getString( + databaseConfig.getProperties(), + ORACLE_TIME_COLUMN_DEFAULT_DATE_COMPONENT, + DEFAULT_ORACLE_TIME_COLUMN_DEFAULT_DATE_COMPONENT); + assert oracleTimeColumnDefaultDateComponentString != null; + oracleTimeColumnDefaultDateComponent = + LocalDate.parse( + oracleTimeColumnDefaultDateComponentString, DateTimeFormatter.ISO_LOCAL_DATE); + if (databaseConfig.getProperties().containsKey(TABLE_METADATA_SCHEMA)) { logger.warn( "The configuration property \"" @@ -298,4 +318,8 @@ public int getMysqlVariableKeyColumnSize() { public int getOracleVariableKeyColumnSize() { return oracleVariableKeyColumnSize; } + + public LocalDate getOracleTimeColumnDefaultDateComponent() { + return oracleTimeColumnDefaultDateComponent; + } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcService.java b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcService.java index 2972d1e3e1..82698ba5c9 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcService.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcService.java @@ -83,7 +83,8 @@ public Optional get(Get get, Connection connection) if (resultSet.next()) { Optional ret = Optional.of( - new ResultInterpreter(get.getProjections(), tableMetadata).interpret(resultSet)); + new ResultInterpreter(get.getProjections(), tableMetadata, rdbEngine) + .interpret(resultSet)); if (resultSet.next()) { throw new IllegalArgumentException( CoreError.GET_OPERATION_USED_FOR_NON_EXACT_MATCH_SELECTION.buildMessage(get)); @@ -107,7 +108,7 @@ public Scanner getScanner(Scan scan, Connection connection) selectQuery.bind(preparedStatement); ResultSet resultSet = preparedStatement.executeQuery(); return new ScannerImpl( - new ResultInterpreter(scan.getProjections(), tableMetadata), + new ResultInterpreter(scan.getProjections(), tableMetadata, rdbEngine), connection, preparedStatement, resultSet); @@ -125,7 +126,7 @@ public List scan(Scan scan, Connection connection) try (ResultSet resultSet = preparedStatement.executeQuery()) { List ret = new ArrayList<>(); ResultInterpreter resultInterpreter = - new ResultInterpreter(scan.getProjections(), tableMetadata); + new ResultInterpreter(scan.getProjections(), tableMetadata, rdbEngine); while (resultSet.next()) { ret.add(resultInterpreter.interpret(resultSet)); } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcUtils.java b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcUtils.java index e2b4bb5a31..376fc811f3 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/JdbcUtils.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/JdbcUtils.java @@ -1,7 +1,9 @@ package com.scalar.db.storage.jdbc; +import com.google.common.annotations.VisibleForTesting; import java.sql.Connection; import java.sql.JDBCType; +import java.util.Map.Entry; import org.apache.commons.dbcp2.BasicDataSource; public final class JdbcUtils { @@ -11,9 +13,14 @@ public static BasicDataSource initDataSource(JdbcConfig config, RdbEngineStrateg return initDataSource(config, rdbEngine, false); } + @VisibleForTesting + static BasicDataSource createDataSource() { + return new BasicDataSource(); + } + public static BasicDataSource initDataSource( JdbcConfig config, RdbEngineStrategy rdbEngine, boolean transactional) { - BasicDataSource dataSource = new BasicDataSource(); + BasicDataSource dataSource = createDataSource(); /* * We need to set the driver class of an underlying database to the dataSource in order @@ -61,12 +68,16 @@ public static BasicDataSource initDataSource( dataSource.setMaxTotal(config.getConnectionPoolMaxTotal()); dataSource.setPoolPreparedStatements(config.isPreparedStatementsPoolEnabled()); dataSource.setMaxOpenPreparedStatements(config.getPreparedStatementsPoolMaxOpen()); + for (Entry entry : rdbEngine.getConnectionProperties().entrySet()) { + dataSource.addConnectionProperty(entry.getKey(), entry.getValue()); + } + return dataSource; } public static BasicDataSource initDataSourceForTableMetadata( JdbcConfig config, RdbEngineStrategy rdbEngine) { - BasicDataSource dataSource = new BasicDataSource(); + BasicDataSource dataSource = createDataSource(); /* * We need to set the driver class of an underlying database to the dataSource in order @@ -86,7 +97,7 @@ public static BasicDataSource initDataSourceForTableMetadata( public static BasicDataSource initDataSourceForAdmin( JdbcConfig config, RdbEngineStrategy rdbEngine) { - BasicDataSource dataSource = new BasicDataSource(); + BasicDataSource dataSource = createDataSource(); /* * We need to set the driver class of an underlying database to the dataSource in order diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMariaDB.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMariaDB.java index 9c3664feba..02913ed0b6 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMariaDB.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMariaDB.java @@ -3,6 +3,7 @@ import com.scalar.db.io.DataType; import java.sql.Driver; import java.sql.JDBCType; +import javax.annotation.Nullable; class RdbEngineMariaDB extends RdbEngineMysql { @Override @@ -11,14 +12,20 @@ public Driver getDriver() { } @Override - public DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription) { + DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType) { if (type == JDBCType.BOOLEAN) { // MariaDB JDBC driver maps TINYINT(1) type as a BOOLEAN JDBC type which differs from the // MySQL driver which maps it to a BIT type. return DataType.BOOLEAN; } else { - return super.getDataTypeForScalarDb(type, typeName, columnSize, digits, columnDescription); + return super.getDataTypeForScalarDbInternal( + type, typeName, columnSize, digits, columnDescription, overrideDataType); } } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMysql.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMysql.java index 643baebcf0..b9aed27bb5 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMysql.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineMysql.java @@ -6,31 +6,40 @@ import com.scalar.db.common.error.CoreError; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.io.DataType; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.storage.jdbc.query.InsertOnDuplicateKeyUpdateQuery; import com.scalar.db.storage.jdbc.query.SelectQuery; import com.scalar.db.storage.jdbc.query.SelectWithLimitQuery; import com.scalar.db.storage.jdbc.query.UpsertQuery; import java.sql.Driver; import java.sql.JDBCType; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.stream.Collectors; import java.util.stream.Stream; import javax.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class RdbEngineMysql implements RdbEngineStrategy { +class RdbEngineMysql extends AbstractRdbEngine { private static final Logger logger = LoggerFactory.getLogger(RdbEngineMysql.class); private final String keyColumnSize; + private final RdbEngineTimeTypeMysql timeTypeEngine; RdbEngineMysql(JdbcConfig config) { keyColumnSize = String.valueOf(config.getMysqlVariableKeyColumnSize()); + timeTypeEngine = new RdbEngineTimeTypeMysql(); } @VisibleForTesting RdbEngineMysql() { keyColumnSize = String.valueOf(JdbcConfig.DEFAULT_VARIABLE_KEY_COLUMN_SIZE); + timeTypeEngine = new RdbEngineTimeTypeMysql(); } @Override @@ -203,6 +212,13 @@ public String getDataTypeForEngine(DataType scalarDbDataType) { return "INT"; case TEXT: return "LONGTEXT"; + case DATE: + return "DATE"; + case TIME: + return "TIME(6)"; + case TIMESTAMP: + case TIMESTAMPTZ: + return "DATETIME(3)"; default: throw new AssertionError(); } @@ -221,8 +237,13 @@ public String getDataTypeForKey(DataType dataType) { } @Override - public DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription) { + DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType) { switch (type) { case BIT: if (columnSize != 1) { @@ -294,6 +315,21 @@ public DataType getDataTypeForScalarDb( typeName); } return DataType.BLOB; + case DATE: + if (typeName.equalsIgnoreCase("YEAR")) { + throw new IllegalArgumentException( + CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( + typeName, columnDescription)); + } + return DataType.DATE; + case TIME: + return DataType.TIME; + // Both MySQL TIMESTAMP and DATETIME data types are mapped to the TIMESTAMP JDBC type + case TIMESTAMP: + if (overrideDataType == DataType.TIMESTAMPTZ || typeName.equalsIgnoreCase("TIMESTAMP")) { + return DataType.TIMESTAMPTZ; + } + return DataType.TIMESTAMP; default: throw new IllegalArgumentException( CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( @@ -318,6 +354,14 @@ public int getSqlTypes(DataType dataType) { return Types.VARCHAR; case BLOB: return Types.BLOB; + case DATE: + return Types.DATE; + case TIME: + return Types.TIME; + case TIMESTAMP: + case TIMESTAMPTZ: + return Types.TIMESTAMP; + default: throw new AssertionError(); } @@ -382,4 +426,21 @@ public String getSchemaName(String namespace) { // method is used for filtering. return namespace; } + + @Override + public TimestampTZColumn parseTimestampTZColumn(ResultSet resultSet, String columnName) + throws SQLException { + LocalDateTime localDateTime = resultSet.getObject(columnName, LocalDateTime.class); + if (localDateTime == null) { + return TimestampTZColumn.ofNull(columnName); + } else { + return TimestampTZColumn.of(columnName, localDateTime.toInstant(ZoneOffset.UTC)); + } + } + + @Override + public RdbEngineTimeTypeStrategy + getTimeTypeStrategy() { + return timeTypeEngine; + } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineOracle.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineOracle.java index 4cba5d5b3b..7efef668ca 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineOracle.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineOracle.java @@ -16,23 +16,30 @@ import java.sql.JDBCType; import java.sql.SQLException; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class RdbEngineOracle implements RdbEngineStrategy { +class RdbEngineOracle extends AbstractRdbEngine { private static final Logger logger = LoggerFactory.getLogger(RdbEngineOracle.class); private final String keyColumnSize; + private final RdbEngineTimeTypeOracle timeTypeEngine; RdbEngineOracle(JdbcConfig config) { keyColumnSize = String.valueOf(config.getOracleVariableKeyColumnSize()); + this.timeTypeEngine = new RdbEngineTimeTypeOracle(config); } @VisibleForTesting RdbEngineOracle() { keyColumnSize = String.valueOf(JdbcConfig.DEFAULT_VARIABLE_KEY_COLUMN_SIZE); + timeTypeEngine = null; } @Override @@ -210,6 +217,14 @@ public String getDataTypeForEngine(DataType scalarDbDataType) { return "NUMBER(10)"; case TEXT: return "VARCHAR2(4000)"; + case DATE: + return "DATE"; + case TIME: + return "TIMESTAMP(6)"; + case TIMESTAMP: + return "TIMESTAMP(3)"; + case TIMESTAMPTZ: + return "TIMESTAMP(3) WITH TIME ZONE"; default: throw new AssertionError(); } @@ -228,8 +243,13 @@ public String getDataTypeForKey(DataType dataType) { } @Override - public DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription) { + DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType) { String numericTypeDescription = String.format("%s(%d, %d)", typeName, columnSize, digits); switch (type) { case NUMERIC: @@ -293,6 +313,29 @@ public DataType getDataTypeForScalarDb( columnDescription, typeName); return DataType.BLOB; + case TIMESTAMP: + // handles "date" type + if (typeName.equalsIgnoreCase("date")) { + if (overrideDataType == DataType.TIME) { + return DataType.TIME; + } + if (overrideDataType == DataType.TIMESTAMP) { + return DataType.TIMESTAMP; + } + return DataType.DATE; + } + // handles "timestamp" type + if (overrideDataType == DataType.TIME) { + return DataType.TIME; + } + return DataType.TIMESTAMP; + case OTHER: + if (typeName.toLowerCase().endsWith("time zone")) { + return DataType.TIMESTAMPTZ; + } + throw new IllegalArgumentException( + CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( + typeName, columnDescription)); default: throw new IllegalArgumentException( CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( @@ -317,6 +360,14 @@ public int getSqlTypes(DataType dataType) { return Types.VARCHAR; case BLOB: return Types.BLOB; + case DATE: + return Types.DATE; + case TIME: + return Types.TIME; + case TIMESTAMP: + return Types.TIMESTAMP; + case TIMESTAMPTZ: + return Types.TIMESTAMP_WITH_TIMEZONE; default: throw new AssertionError(); } @@ -347,4 +398,10 @@ public String getEscape(LikeExpression likeExpression) { public String tryAddIfNotExistsToCreateIndexSql(String createIndexSql) { return createIndexSql; } + + @Override + public RdbEngineTimeTypeStrategy + getTimeTypeStrategy() { + return timeTypeEngine; + } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEnginePostgresql.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEnginePostgresql.java index c6020713d3..8ab6c02958 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEnginePostgresql.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEnginePostgresql.java @@ -14,14 +14,24 @@ import java.sql.JDBCType; import java.sql.SQLException; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class RdbEnginePostgresql implements RdbEngineStrategy { +class RdbEnginePostgresql extends AbstractRdbEngine { private static final Logger logger = LoggerFactory.getLogger(RdbEnginePostgresql.class); + private final RdbEngineTimeTypePostgresql timeTypeEngine; + + public RdbEnginePostgresql() { + timeTypeEngine = new RdbEngineTimeTypePostgresql(); + } @Override public String[] createSchemaSqls(String fullSchema) { @@ -197,6 +207,14 @@ public String getDataTypeForEngine(DataType scalarDbDataType) { return "INT"; case TEXT: return "TEXT"; + case DATE: + return "DATE"; + case TIME: + return "TIME"; + case TIMESTAMP: + return "TIMESTAMP"; + case TIMESTAMPTZ: + return "TIMESTAMP WITH TIME ZONE"; default: throw new AssertionError(); } @@ -213,8 +231,13 @@ public String getDataTypeForKey(DataType dataType) { } @Override - public DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription) { + DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType) { switch (type) { case BIT: if (columnSize != 1) { @@ -272,6 +295,20 @@ public DataType getDataTypeForScalarDb( return DataType.TEXT; case BINARY: return DataType.BLOB; + case DATE: + return DataType.DATE; + case TIME: + if (typeName.equalsIgnoreCase("timetz")) { + throw new IllegalArgumentException( + CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( + typeName, columnDescription)); + } + return DataType.TIME; + case TIMESTAMP: + if (typeName.equalsIgnoreCase("timestamptz")) { + return DataType.TIMESTAMPTZ; + } + return DataType.TIMESTAMP; default: throw new IllegalArgumentException( CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( @@ -296,6 +333,14 @@ public int getSqlTypes(DataType dataType) { return Types.VARCHAR; case BLOB: return Types.VARBINARY; + case DATE: + return Types.DATE; + case TIME: + return Types.TIME; + case TIMESTAMP: + return Types.TIMESTAMP; + case TIMESTAMPTZ: + return Types.TIMESTAMP_WITH_TIMEZONE; default: throw new AssertionError(); } @@ -320,4 +365,10 @@ public Driver getDriver() { public String tryAddIfNotExistsToCreateIndexSql(String createIndexSql) { return createIndexSql.replace("CREATE INDEX", "CREATE INDEX IF NOT EXISTS"); } + + @Override + public RdbEngineTimeTypeStrategy + getTimeTypeStrategy() { + return timeTypeEngine; + } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlServer.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlServer.java index b6d92a1055..4714d6d2f6 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlServer.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlServer.java @@ -1,5 +1,6 @@ package com.scalar.db.storage.jdbc; +import com.google.common.collect.ImmutableMap; import com.scalar.db.api.LikeExpression; import com.scalar.db.api.TableMetadata; import com.scalar.db.common.error.CoreError; @@ -13,13 +14,21 @@ import java.sql.JDBCType; import java.sql.SQLException; import java.sql.Types; +import java.time.LocalTime; +import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; +import microsoft.sql.DateTimeOffset; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class RdbEngineSqlServer implements RdbEngineStrategy { +class RdbEngineSqlServer extends AbstractRdbEngine { private static final Logger logger = LoggerFactory.getLogger(RdbEngineSqlServer.class); + private final RdbEngineTimeTypeSqlServer timeTypeEngine; + + RdbEngineSqlServer() { + timeTypeEngine = new RdbEngineTimeTypeSqlServer(); + } @Override public String[] createSchemaSqls(String fullSchema) { @@ -177,6 +186,14 @@ public String getDataTypeForEngine(DataType scalarDbDataType) { return "INT"; case TEXT: return "VARCHAR(8000)"; + case DATE: + return "DATE"; + case TIME: + return "TIME(6)"; + case TIMESTAMP: + return "DATETIME2(3)"; + case TIMESTAMPTZ: + return "DATETIMEOFFSET(3)"; default: throw new AssertionError(); } @@ -189,8 +206,13 @@ public String getDataTypeForKey(DataType dataType) { } @Override - public DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription) { + DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + DataType overrideDataType) { switch (type) { case BIT: if (columnSize != 1) { @@ -256,6 +278,19 @@ public DataType getDataTypeForScalarDb( return DataType.BLOB; case LONGVARBINARY: return DataType.BLOB; + case DATE: + return DataType.DATE; + case TIME: + return DataType.TIME; + case TIMESTAMP: + return DataType.TIMESTAMP; + case OTHER: + if (!typeName.equalsIgnoreCase("datetimeoffset")) { + throw new IllegalArgumentException( + CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( + typeName, columnDescription)); + } + return DataType.TIMESTAMPTZ; default: throw new IllegalArgumentException( CoreError.JDBC_IMPORT_DATA_TYPE_NOT_SUPPORTED.buildMessage( @@ -280,6 +315,14 @@ public int getSqlTypes(DataType dataType) { return Types.VARCHAR; case BLOB: return Types.BLOB; + case DATE: + return Types.DATE; + case TIME: + return Types.TIME; + case TIMESTAMP: + return Types.TIMESTAMP; + case TIMESTAMPTZ: + return microsoft.sql.Types.DATETIMEOFFSET; default: throw new AssertionError(); } @@ -328,4 +371,17 @@ public String getEscape(LikeExpression likeExpression) { public String tryAddIfNotExistsToCreateIndexSql(String createIndexSql) { return createIndexSql; } + + @Override + public Map getConnectionProperties() { + // Needed to keep the microsecond precision when sending the value of ScalarDB TIME type. + // It is being considered setting to it to false by default in a future driver release. + return ImmutableMap.of("sendTimeAsDatetime", "false"); + } + + @Override + public RdbEngineTimeTypeStrategy + getTimeTypeStrategy() { + return timeTypeEngine; + } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlite.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlite.java index dce69fd267..fb436751c4 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlite.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineSqlite.java @@ -3,13 +3,19 @@ import com.scalar.db.api.LikeExpression; import com.scalar.db.api.TableMetadata; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.storage.jdbc.query.InsertOnConflictDoUpdateQuery; import com.scalar.db.storage.jdbc.query.SelectQuery; import com.scalar.db.storage.jdbc.query.SelectWithLimitQuery; import com.scalar.db.storage.jdbc.query.UpsertQuery; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.sql.Driver; import java.sql.JDBCType; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.stream.Collectors; @@ -29,8 +35,13 @@ * native SQLite library in JAR, we should assure the real error messages in * RdbEngineStrategyTest. */ -class RdbEngineSqlite implements RdbEngineStrategy { +class RdbEngineSqlite extends AbstractRdbEngine { private static final String NAMESPACE_SEPARATOR = "$"; + private final RdbEngineTimeTypeSqlite timeTypeEngine; + + public RdbEngineSqlite() { + timeTypeEngine = new RdbEngineTimeTypeSqlite(); + } @Override public boolean isDuplicateTableError(SQLException e) { @@ -86,6 +97,10 @@ public String getDataTypeForEngine(DataType scalarDbDataType) { case INT: return "INT"; case BIGINT: + case DATE: + case TIME: + case TIMESTAMP: + case TIMESTAMPTZ: return "BIGINT"; case FLOAT: return "FLOAT"; @@ -112,6 +127,10 @@ public int getSqlTypes(DataType dataType) { return Types.BOOLEAN; case INT: return Types.INTEGER; + case DATE: + case TIME: + case TIMESTAMP: + case TIMESTAMPTZ: case BIGINT: return Types.BIGINT; case FLOAT: @@ -133,8 +152,13 @@ public String getTextType(int charLength) { } @Override - public DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription) { + public DataType getDataTypeForScalarDbInternal( + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + DataType overrideDataType) { throw new AssertionError("SQLite is not supported"); } @@ -279,4 +303,36 @@ public String getEscape(LikeExpression likeExpression) { public String tryAddIfNotExistsToCreateIndexSql(String createIndexSql) { return createIndexSql.replace("CREATE INDEX", "CREATE INDEX IF NOT EXISTS"); } + + @Override + public DateColumn parseDateColumn(ResultSet resultSet, String columnName) throws SQLException { + return DateColumn.of( + columnName, TimeRelatedColumnEncodingUtils.decodeDate(resultSet.getLong(columnName))); + } + + @Override + public TimeColumn parseTimeColumn(ResultSet resultSet, String columnName) throws SQLException { + return TimeColumn.of( + columnName, TimeRelatedColumnEncodingUtils.decodeTime(resultSet.getLong(columnName))); + } + + @Override + public TimestampColumn parseTimestampColumn(ResultSet resultSet, String columnName) + throws SQLException { + return TimestampColumn.of( + columnName, TimeRelatedColumnEncodingUtils.decodeTimestamp(resultSet.getLong(columnName))); + } + + @Override + public TimestampTZColumn parseTimestampTZColumn(ResultSet resultSet, String columnName) + throws SQLException { + return TimestampTZColumn.of( + columnName, + TimeRelatedColumnEncodingUtils.decodeTimestampTZ(resultSet.getLong(columnName))); + } + + @Override + public RdbEngineTimeTypeStrategy getTimeTypeStrategy() { + return timeTypeEngine; + } } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineStrategy.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineStrategy.java index 70f6bee6e9..6dc20be379 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineStrategy.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineStrategy.java @@ -4,11 +4,23 @@ import com.scalar.db.api.TableMetadata; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.storage.jdbc.query.SelectQuery; import com.scalar.db.storage.jdbc.query.UpsertQuery; import java.sql.Driver; import java.sql.JDBCType; +import java.sql.ResultSet; import java.sql.SQLException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Collections; +import java.util.Map; import javax.annotation.Nullable; /** @@ -30,7 +42,12 @@ public interface RdbEngineStrategy { String getDataTypeForKey(DataType dataType); DataType getDataTypeForScalarDb( - JDBCType type, String typeName, int columnSize, int digits, String columnDescription); + JDBCType type, + String typeName, + int columnSize, + int digits, + String columnDescription, + @Nullable DataType overrideDataType); int getSqlTypes(DataType dataType); @@ -131,4 +148,58 @@ default String getPattern(LikeExpression likeExpression) { default @Nullable String getSchemaName(String namespace) { return namespace; } + + default LocalDate encode(DateColumn column) { + assert column.getDateValue() != null; + return column.getDateValue(); + } + + default LocalTime encode(TimeColumn column) { + assert column.getTimeValue() != null; + return column.getTimeValue(); + } + + default LocalDateTime encode(TimestampColumn column) { + assert column.getTimestampValue() != null; + return column.getTimestampValue(); + } + + default OffsetDateTime encode(TimestampTZColumn column) { + assert column.getTimestampTZValue() != null; + return column.getTimestampTZValue().atOffset(ZoneOffset.UTC); + } + + default DateColumn parseDateColumn(ResultSet resultSet, String columnName) throws SQLException { + return DateColumn.of(columnName, resultSet.getObject(columnName, LocalDate.class)); + } + + default TimeColumn parseTimeColumn(ResultSet resultSet, String columnName) throws SQLException { + return TimeColumn.of(columnName, resultSet.getObject(columnName, LocalTime.class)); + } + + default TimestampColumn parseTimestampColumn(ResultSet resultSet, String columnName) + throws SQLException { + return TimestampColumn.of(columnName, resultSet.getObject(columnName, LocalDateTime.class)); + } + + default TimestampTZColumn parseTimestampTZColumn(ResultSet resultSet, String columnName) + throws SQLException { + OffsetDateTime offsetDateTime = resultSet.getObject(columnName, OffsetDateTime.class); + if (offsetDateTime == null) { + return TimestampTZColumn.ofNull(columnName); + } else { + return TimestampTZColumn.of(columnName, offsetDateTime.toInstant()); + } + } + + /** + * Return the connection properties for the underlying database. + * + * @return a map where key=property_name and value=property_value + */ + default Map getConnectionProperties() { + return Collections.emptyMap(); + } + + RdbEngineTimeTypeStrategy getTimeTypeStrategy(); } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeMysql.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeMysql.java new file mode 100644 index 0000000000..e7d7f59ebc --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeMysql.java @@ -0,0 +1,31 @@ +package com.scalar.db.storage.jdbc; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; + +public class RdbEngineTimeTypeMysql + implements RdbEngineTimeTypeStrategy { + @Override + public LocalDate convert(LocalDate date) { + return date; + } + + @Override + public LocalTime convert(LocalTime time) { + return time; + } + + @Override + public LocalDateTime convert(LocalDateTime timestamp) { + return timestamp; + } + + @Override + public LocalDateTime convert(OffsetDateTime timestampTZ) { + // Encoding as an OffsetDateTime result in the time being offset arbitrarily depending on the + // client, session or server time zone. So we encode it as a LocalDateTime instead. + return timestampTZ.toLocalDateTime(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeOracle.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeOracle.java new file mode 100644 index 0000000000..b5e0d5ff70 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeOracle.java @@ -0,0 +1,35 @@ +package com.scalar.db.storage.jdbc; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; + +public class RdbEngineTimeTypeOracle + implements RdbEngineTimeTypeStrategy { + private final JdbcConfig config; + + public RdbEngineTimeTypeOracle(JdbcConfig config) { + this.config = config; + } + + @Override + public LocalDate convert(LocalDate date) { + return date; + } + + @Override + public LocalDateTime convert(LocalTime time) { + return LocalDateTime.of(config.getOracleTimeColumnDefaultDateComponent(), time); + } + + @Override + public LocalDateTime convert(LocalDateTime timestamp) { + return timestamp; + } + + @Override + public OffsetDateTime convert(OffsetDateTime timestampTZ) { + return timestampTZ; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypePostgresql.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypePostgresql.java new file mode 100644 index 0000000000..56ef703d06 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypePostgresql.java @@ -0,0 +1,29 @@ +package com.scalar.db.storage.jdbc; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; + +public class RdbEngineTimeTypePostgresql + implements RdbEngineTimeTypeStrategy { + @Override + public LocalDate convert(LocalDate date) { + return date; + } + + @Override + public LocalTime convert(LocalTime time) { + return time; + } + + @Override + public LocalDateTime convert(LocalDateTime timestamp) { + return timestamp; + } + + @Override + public OffsetDateTime convert(OffsetDateTime timestampTZ) { + return timestampTZ; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlServer.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlServer.java new file mode 100644 index 0000000000..df6a05da11 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlServer.java @@ -0,0 +1,38 @@ +package com.scalar.db.storage.jdbc; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import microsoft.sql.DateTimeOffset; + +public class RdbEngineTimeTypeSqlServer + implements RdbEngineTimeTypeStrategy { + + @Override + public String convert(LocalDate date) { + // Pass the date value as text otherwise the dates before the Julian to Gregorian Calendar + // transition (October 15, 1582) will be offset by 10 days. + return date.format(DateTimeFormatter.BASIC_ISO_DATE); + } + + @Override + public LocalTime convert(LocalTime time) { + return time; + } + + @Override + public String convert(LocalDateTime timestamp) { + // Pass the timestamp value as text otherwise the dates before the Julian to Gregorian Calendar + // transition (October 15, 1582) will be offset by 10 days. + return timestamp.format(DateTimeFormatter.ISO_DATE_TIME); + } + + @Override + public DateTimeOffset convert(OffsetDateTime timestampTZ) { + // When using SQLServer DATETIMEOFFSET data type, we should use the SQLServer JDBC driver's + // microsoft.sql.DateTimeOffset class for encoding the value. + return DateTimeOffset.valueOf(timestampTZ); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlite.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlite.java new file mode 100644 index 0000000000..c8f3d26e39 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeSqlite.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.jdbc; + +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; + +public class RdbEngineTimeTypeSqlite implements RdbEngineTimeTypeStrategy { + + @Override + public Long convert(LocalDate date) { + return TimeRelatedColumnEncodingUtils.encode(date); + } + + @Override + public Long convert(LocalTime time) { + return TimeRelatedColumnEncodingUtils.encode(time); + } + + @Override + public Long convert(LocalDateTime timestamp) { + return TimeRelatedColumnEncodingUtils.encode(timestamp); + } + + @Override + public Long convert(OffsetDateTime timestampTZ) { + return TimeRelatedColumnEncodingUtils.encode(timestampTZ.toInstant()); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeStrategy.java b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeStrategy.java new file mode 100644 index 0000000000..36c001bb2b --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/jdbc/RdbEngineTimeTypeStrategy.java @@ -0,0 +1,25 @@ +package com.scalar.db.storage.jdbc; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; + +/** + * An interface to convert time related types columns backing Java 8 time type to a corresponding + * type that can be used in prepared statement by the JDBC driver + * + * @param the converted type for LocalDate + * @param the converted type for LocalTime + * @param the converted type for LocalDateTime + * @param the converted type for OffsetDateTime + */ +public interface RdbEngineTimeTypeStrategy { + T_DATE convert(LocalDate date); + + T_TIME convert(LocalTime time); + + T_TIMESTAMP convert(LocalDateTime timestamp); + + T_TIMESTAMPTZ convert(OffsetDateTime timestampTZ); +} diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/jdbc/ResultInterpreter.java index 7384e07760..0eecfe8183 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/ResultInterpreter.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/ResultInterpreter.java @@ -8,10 +8,14 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.sql.ResultSet; import java.sql.SQLException; @@ -26,11 +30,14 @@ public class ResultInterpreter { private final List projections; private final TableMetadata metadata; + private final RdbEngineStrategy rdbEngine; @SuppressFBWarnings("EI_EXPOSE_REP2") - public ResultInterpreter(List projections, TableMetadata metadata) { + public ResultInterpreter( + List projections, TableMetadata metadata, RdbEngineStrategy rdbEngine) { this.projections = Objects.requireNonNull(projections); this.metadata = Objects.requireNonNull(metadata); + this.rdbEngine = rdbEngine; } public Result interpret(ResultSet resultSet) throws SQLException { @@ -96,6 +103,30 @@ private Column convert(String name, ResultSet resultSet) throws SQLException ret = BlobColumn.ofNull(name); } break; + case DATE: + ret = rdbEngine.parseDateColumn(resultSet, name); + if (resultSet.wasNull()) { + ret = DateColumn.ofNull(name); + } + break; + case TIME: + ret = rdbEngine.parseTimeColumn(resultSet, name); + if (resultSet.wasNull()) { + ret = TimeColumn.ofNull(name); + } + break; + case TIMESTAMP: + ret = rdbEngine.parseTimestampColumn(resultSet, name); + if (resultSet.wasNull()) { + ret = TimestampColumn.ofNull(name); + } + break; + case TIMESTAMPTZ: + ret = rdbEngine.parseTimestampTZColumn(resultSet, name); + if (resultSet.wasNull()) { + ret = TimestampTZColumn.ofNull(name); + } + break; default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/DeleteQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/DeleteQuery.java index afc51f9387..4a5a599814 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/DeleteQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/DeleteQuery.java @@ -48,9 +48,9 @@ public String sql() { private String conditionSqlString() { List conditions = new ArrayList<>(); - partitionKey.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?")); + partitionKey.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?")); clusteringKey.ifPresent( - k -> k.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); + k -> k.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); otherConditions.forEach( c -> conditions.add( diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnConflictDoUpdateQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnConflictDoUpdateQuery.java index 4816e84eca..8e38a91bc0 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnConflictDoUpdateQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnConflictDoUpdateQuery.java @@ -48,8 +48,8 @@ public String sql() { private String makeValuesSqlString() { List names = new ArrayList<>(); - partitionKey.forEach(v -> names.add(v.getName())); - clusteringKey.ifPresent(k -> k.forEach(v -> names.add(v.getName()))); + partitionKey.getColumns().forEach(v -> names.add(v.getName())); + clusteringKey.ifPresent(k -> k.getColumns().forEach(v -> names.add(v.getName()))); names.addAll(columns.keySet()); return "(" @@ -61,8 +61,8 @@ private String makeValuesSqlString() { private String makeOnConflictDoUpdateSqlString() { List primaryKeys = new ArrayList<>(); - partitionKey.forEach(v -> primaryKeys.add(v.getName())); - clusteringKey.ifPresent(k -> k.forEach(v -> primaryKeys.add(v.getName()))); + partitionKey.getColumns().forEach(v -> primaryKeys.add(v.getName())); + clusteringKey.ifPresent(k -> k.getColumns().forEach(v -> primaryKeys.add(v.getName()))); StringBuilder sql = new StringBuilder(); sql.append("ON CONFLICT (") diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnDuplicateKeyUpdateQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnDuplicateKeyUpdateQuery.java index d5da6cdfdd..821ecef6eb 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnDuplicateKeyUpdateQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertOnDuplicateKeyUpdateQuery.java @@ -55,8 +55,8 @@ public String sql() { private String makeValuesSqlString() { List names = new ArrayList<>(); - partitionKey.forEach(v -> names.add(v.getName())); - clusteringKey.ifPresent(k -> k.forEach(v -> names.add(v.getName()))); + partitionKey.getColumns().forEach(v -> names.add(v.getName())); + clusteringKey.ifPresent(k -> k.getColumns().forEach(v -> names.add(v.getName()))); names.addAll(columns.keySet()); return "(" + names.stream().map(rdbEngine::enclose).collect(Collectors.joining(",")) diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertQuery.java index 8c91180051..10a9e66946 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/InsertQuery.java @@ -45,8 +45,8 @@ public String sql() { private String makeValuesSqlString() { List names = new ArrayList<>(); - partitionKey.forEach(v -> names.add(v.getName())); - clusteringKey.ifPresent(k -> k.forEach(v -> names.add(v.getName()))); + partitionKey.getColumns().forEach(v -> names.add(v.getName())); + clusteringKey.ifPresent(k -> k.getColumns().forEach(v -> names.add(v.getName()))); names.addAll(columns.keySet()); return "(" + names.stream().map(rdbEngine::enclose).collect(Collectors.joining(",")) diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeIntoQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeIntoQuery.java index 260714f641..5fc8b438de 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeIntoQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeIntoQuery.java @@ -39,9 +39,9 @@ public MergeIntoQuery(Builder builder) { @Override public String sql() { List enclosedKeyNames = new ArrayList<>(); - partitionKey.forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName()))); + partitionKey.getColumns().forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName()))); clusteringKey.ifPresent( - k -> k.forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName())))); + k -> k.getColumns().forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName())))); List enclosedValueNames = columns.keySet().stream().map(rdbEngine::enclose).collect(Collectors.toList()); diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeQuery.java index 742dda5234..24eefe38bc 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/MergeQuery.java @@ -39,9 +39,9 @@ public MergeQuery(Builder builder) { @Override public String sql() { List enclosedKeyNames = new ArrayList<>(); - partitionKey.forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName()))); + partitionKey.getColumns().forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName()))); clusteringKey.ifPresent( - k -> k.forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName())))); + k -> k.getColumns().forEach(v -> enclosedKeyNames.add(rdbEngine.enclose(v.getName())))); List enclosedValueNames = columns.keySet().stream().map(rdbEngine::enclose).collect(Collectors.toList()); diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/PreparedStatementBinder.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/PreparedStatementBinder.java index dd213df480..906446f512 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/PreparedStatementBinder.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/PreparedStatementBinder.java @@ -7,10 +7,14 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.storage.jdbc.RdbEngineStrategy; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.sql.PreparedStatement; @@ -23,7 +27,6 @@ public class PreparedStatementBinder implements ColumnVisitor { private final PreparedStatement preparedStatement; private final TableMetadata tableMetadata; private final RdbEngineStrategy rdbEngine; - private int index = 1; private SQLException sqlException; @@ -134,6 +137,62 @@ public void visit(BlobColumn column) { } } + @Override + public void visit(DateColumn column) { + try { + if (column.hasNullValue()) { + preparedStatement.setNull(index++, getSqlType(column.getName())); + } else { + preparedStatement.setObject( + index++, rdbEngine.getTimeTypeStrategy().convert(rdbEngine.encode(column))); + } + } catch (SQLException e) { + sqlException = e; + } + } + + @Override + public void visit(TimeColumn column) { + try { + if (column.hasNullValue()) { + preparedStatement.setNull(index++, getSqlType(column.getName())); + } else { + preparedStatement.setObject( + index++, rdbEngine.getTimeTypeStrategy().convert(rdbEngine.encode(column))); + } + } catch (SQLException e) { + sqlException = e; + } + } + + @Override + public void visit(TimestampColumn column) { + try { + if (column.hasNullValue()) { + preparedStatement.setNull(index++, getSqlType(column.getName())); + } else { + preparedStatement.setObject( + index++, rdbEngine.getTimeTypeStrategy().convert(rdbEngine.encode(column))); + } + } catch (SQLException e) { + sqlException = e; + } + } + + @Override + public void visit(TimestampTZColumn column) { + try { + if (column.hasNullValue()) { + preparedStatement.setNull(index++, getSqlType(column.getName())); + } else { + preparedStatement.setObject( + index++, rdbEngine.getTimeTypeStrategy().convert(rdbEngine.encode(column))); + } + } catch (SQLException e) { + sqlException = e; + } + } + public void bindLikeClause(LikeExpression likeExpression) { try { String pattern = rdbEngine.getPattern(likeExpression); diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/SimpleSelectQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/SimpleSelectQuery.java index 44cbb9ddd6..2df3fe6caf 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/SimpleSelectQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/SimpleSelectQuery.java @@ -94,11 +94,11 @@ private String projectionSqlString() { private String conditionSqlString() { List conditions = new ArrayList<>(); partitionKey.ifPresent( - k -> k.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); + k -> k.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); clusteringKey.ifPresent( - k -> k.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); + k -> k.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); commonClusteringKey.ifPresent( - k -> k.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); + k -> k.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); startColumn.ifPresent( c -> conditions.add(rdbEngine.enclose(c.getName()) + (startInclusive ? ">=?" : ">?"))); endColumn.ifPresent( diff --git a/core/src/main/java/com/scalar/db/storage/jdbc/query/UpdateQuery.java b/core/src/main/java/com/scalar/db/storage/jdbc/query/UpdateQuery.java index 0ba6930016..6c91579f18 100644 --- a/core/src/main/java/com/scalar/db/storage/jdbc/query/UpdateQuery.java +++ b/core/src/main/java/com/scalar/db/storage/jdbc/query/UpdateQuery.java @@ -60,9 +60,9 @@ private String makeSetSqlString() { private String makeConditionSqlString() { List conditions = new ArrayList<>(); - partitionKey.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?")); + partitionKey.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?")); clusteringKey.ifPresent( - k -> k.forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); + k -> k.getColumns().forEach(v -> conditions.add(rdbEngine.enclose(v.getName()) + "=?"))); otherConditions.forEach( c -> conditions.add( diff --git a/core/src/main/java/com/scalar/db/storage/multistorage/MultiStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/multistorage/MultiStorageAdmin.java index 3196bce219..a1443e1999 100644 --- a/core/src/main/java/com/scalar/db/storage/multistorage/MultiStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/multistorage/MultiStorageAdmin.java @@ -196,9 +196,10 @@ public void addNewColumnToTable( } @Override - public TableMetadata getImportTableMetadata(String namespace, String table) + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) throws ExecutionException { - return getAdmin(namespace, table).getImportTableMetadata(namespace, table); + return getAdmin(namespace, table).getImportTableMetadata(namespace, table, overrideColumnsType); } @Override @@ -209,9 +210,13 @@ public void addRawColumnToTable( } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { - getAdmin(namespace, table).importTable(namespace, table, options); + getAdmin(namespace, table).importTable(namespace, table, options, overrideColumnsType); } @Override diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdmin.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdmin.java index 2661d36773..8d86be812e 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdmin.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdmin.java @@ -246,7 +246,11 @@ public Set getNamespaceNames() throws ExecutionException { } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { checkNamespace(namespace); @@ -256,7 +260,7 @@ public void importTable(String namespace, String table, Map opti CoreError.TABLE_ALREADY_EXISTS.buildMessage( ScalarDbUtils.getFullTableName(namespace, table))); } - tableMetadata = admin.getImportTableMetadata(namespace, table); + tableMetadata = admin.getImportTableMetadata(namespace, table, overrideColumnsType); // add transaction metadata columns for (Map.Entry entry : diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/FilteredResult.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/FilteredResult.java index 96ff5253e5..7cd1adc2a0 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/FilteredResult.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/FilteredResult.java @@ -9,6 +9,10 @@ import com.scalar.db.io.Column; import com.scalar.db.io.Key; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -132,6 +136,34 @@ public byte[] getBlobAsBytes(String columnName) { return original.getBlobAsBytes(columnName); } + @Nullable + @Override + public LocalDate getDate(String columnName) { + checkIfExists(columnName); + return original.getDate(columnName); + } + + @Nullable + @Override + public LocalTime getTime(String columnName) { + checkIfExists(columnName); + return original.getTime(columnName); + } + + @Nullable + @Override + public LocalDateTime getTimestamp(String columnName) { + checkIfExists(columnName); + return original.getTimestamp(columnName); + } + + @Nullable + @Override + public Instant getTimestampTZ(String columnName) { + checkIfExists(columnName); + return original.getTimestampTZ(columnName); + } + @Nullable @Override public Object getAsObject(String columnName) { diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/MergedResult.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/MergedResult.java index 0c2f1e7676..75b27190d2 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/MergedResult.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/MergedResult.java @@ -8,11 +8,19 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -139,6 +147,46 @@ public byte[] getBlobAsBytes(String columnName) { return result.map(r -> r.getBlobAsBytes(columnName)).orElse(null); } + @Nullable + @Override + public LocalDate getDate(String columnName) { + checkIfExists(columnName); + if (putColumns.containsKey(columnName)) { + return putColumns.get(columnName).getDateValue(); + } + return result.map(r -> r.getDate(columnName)).orElse(null); + } + + @Nullable + @Override + public LocalTime getTime(String columnName) { + checkIfExists(columnName); + if (putColumns.containsKey(columnName)) { + return putColumns.get(columnName).getTimeValue(); + } + return result.map(r -> r.getTime(columnName)).orElse(null); + } + + @Nullable + @Override + public LocalDateTime getTimestamp(String columnName) { + checkIfExists(columnName); + if (putColumns.containsKey(columnName)) { + return putColumns.get(columnName).getTimestampValue(); + } + return result.map(r -> r.getTimestamp(columnName)).orElse(null); + } + + @Nullable + @Override + public Instant getTimestampTZ(String columnName) { + checkIfExists(columnName); + if (putColumns.containsKey(columnName)) { + return putColumns.get(columnName).getTimestampTZValue(); + } + return result.map(r -> r.getTimestampTZ(columnName)).orElse(null); + } + @Nullable @Override public Object getAsObject(String columnName) { @@ -162,6 +210,14 @@ public Object getAsObject(String columnName) { return getText(columnName); case BLOB: return getBlob(columnName); + case DATE: + return getDate(columnName); + case TIME: + return getTime(columnName); + case TIMESTAMP: + return getTimestamp(columnName); + case TIMESTAMPTZ: + return getTimestampTZ(columnName); default: throw new AssertionError(); } @@ -212,6 +268,14 @@ private Column getNullColumn(String columnName) { return TextColumn.ofNull(columnName); case BLOB: return BlobColumn.ofNull(columnName); + case DATE: + return DateColumn.ofNull(columnName); + case TIME: + return TimeColumn.ofNull(columnName); + case TIMESTAMP: + return TimestampColumn.ofNull(columnName); + case TIMESTAMPTZ: + return TimestampTZColumn.ofNull(columnName); default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/TransactionResult.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/TransactionResult.java index b8007a64b7..84abc67105 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/TransactionResult.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/TransactionResult.java @@ -10,6 +10,10 @@ import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -87,6 +91,30 @@ public byte[] getBlobAsBytes(String columnName) { return result.getBlobAsBytes(columnName); } + @Nullable + @Override + public LocalDate getDate(String columnName) { + return result.getDate(columnName); + } + + @Nullable + @Override + public LocalTime getTime(String columnName) { + return result.getTime(columnName); + } + + @Nullable + @Override + public LocalDateTime getTimestamp(String columnName) { + return result.getTimestamp(columnName); + } + + @Nullable + @Override + public Instant getTimestampTZ(String columnName) { + return result.getTimestampTZ(columnName); + } + @Nullable @Override public Object getAsObject(String columnName) { diff --git a/core/src/main/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdmin.java b/core/src/main/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdmin.java index 5bf7f0a6c7..76f060903e 100644 --- a/core/src/main/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdmin.java @@ -86,9 +86,13 @@ public boolean namespaceExists(String namespace) throws ExecutionException { } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { - jdbcAdmin.importTable(namespace, table, options); + jdbcAdmin.importTable(namespace, table, options, overrideColumnsType); } /** diff --git a/core/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdmin.java b/core/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdmin.java index 36907e8669..a941639f1f 100644 --- a/core/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdmin.java @@ -86,9 +86,13 @@ public boolean namespaceExists(String namespace) throws ExecutionException { } @Override - public void importTable(String namespace, String table, Map options) + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) throws ExecutionException { - distributedStorageAdmin.importTable(namespace, table, options); + distributedStorageAdmin.importTable(namespace, table, options, overrideColumnsType); } @Override diff --git a/core/src/main/java/com/scalar/db/util/ScalarDbUtils.java b/core/src/main/java/com/scalar/db/util/ScalarDbUtils.java index 635452c253..a4261b7517 100644 --- a/core/src/main/java/com/scalar/db/util/ScalarDbUtils.java +++ b/core/src/main/java/com/scalar/db/util/ScalarDbUtils.java @@ -221,6 +221,12 @@ public static Value toValue(Column column) { return new TextValue(column.getName(), column.getTextValue()); case BLOB: return new BlobValue(column.getName(), column.getBlobValue()); + case DATE: + case TIME: + case TIMESTAMP: + case TIMESTAMPTZ: + throw new UnsupportedOperationException( + "The type " + column.getDataType() + " is not supported"); default: throw new AssertionError(); } diff --git a/core/src/main/java/com/scalar/db/util/TimeRelatedColumnEncodingUtils.java b/core/src/main/java/com/scalar/db/util/TimeRelatedColumnEncodingUtils.java new file mode 100644 index 0000000000..cbdbd090b5 --- /dev/null +++ b/core/src/main/java/com/scalar/db/util/TimeRelatedColumnEncodingUtils.java @@ -0,0 +1,112 @@ +package com.scalar.db.util; + +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; + +/** + * This class provides utility methods for encoding and decoding time related column value for + * DynamoDB, CosmosDB and SQLite + */ +public final class TimeRelatedColumnEncodingUtils { + private TimeRelatedColumnEncodingUtils() {} + + public static long encode(DateColumn column) { + assert column.getDateValue() != null; + return encode(column.getDateValue()); + } + + public static long encode(LocalDate date) { + return date.toEpochDay(); + } + + public static LocalDate decodeDate(long epochDay) { + return LocalDate.ofEpochDay(epochDay); + } + + public static long encode(TimeColumn column) { + assert column.getTimeValue() != null; + return encode(column.getTimeValue()); + } + + public static long encode(LocalTime time) { + return time.toNanoOfDay(); + } + + public static LocalTime decodeTime(long nanoOfDay) { + return LocalTime.ofNanoOfDay(nanoOfDay); + } + + public static long encode(TimestampColumn column) { + assert column.getTimestampValue() != null; + return encode(column.getTimestampValue()); + } + + public static long encode(LocalDateTime timestamp) { + return encode(timestamp.toInstant(ZoneOffset.UTC)); + } + + public static LocalDateTime decodeTimestamp(long longTimestamp) { + long milliOfSecond = longTimestamp % 1000; + if (longTimestamp < 0) { + // Convert the complement of the millisecondOfSecond to the actual millisecondOfSecond + milliOfSecond += 1000 - 1; + } + return LocalDateTime.ofEpochSecond( + longTimestamp / 1000, Math.toIntExact(milliOfSecond * 1_000_000), ZoneOffset.UTC); + } + + public static long encode(TimestampTZColumn column) { + assert column.getTimestampTZValue() != null; + return encode(column.getTimestampTZValue()); + } + + public static Instant decodeTimestampTZ(long longTimestampTZ) { + long milliOfSecond = longTimestampTZ % 1000; + + if (longTimestampTZ < 0) { + // Convert the complement of the millisecondOfSecond to the actual millisecondOfSecond + milliOfSecond += 1000 - 1; + } + return Instant.ofEpochSecond(longTimestampTZ / 1000, milliOfSecond * 1_000_000); + } + + @SuppressWarnings("JavaInstantGetSecondsGetNano") + public static long encode(Instant instant) { + // A Java Instant object is internally represented with two components: + // - the epoch second: the number of seconds since the epoch date 1970-01-01 + // - the nano of second: the number of nanoseconds since the start of the second. + // + // The range and precision of a ScalarDB TIMESTAMP and TIMESTAMPTZ is from January 1st of 1000 + // to December 31st of 9999, and its precision is up to 1 millisecond. Since the range is + // smaller and precision less precise, both components can be encoded into a single long value. + // The long value format is "", where the rightmost 3 digits + // are the millisecondOfSecond with a value from 000 to 999, the other digits on the left are + // the epochSecond. + // To preserve the timestamp ordering if the epochSecond is negative (in case an instant is + // before 1970), the millisecondOfSecond is converted to its complement with the + // formula "complementOfN = 1000 - 1 - N", where N is the millisecondOfSecond. + // + // For example: + // - if epochSecond=12345 and millisecondOfSecond=789, then the encoded value will be 12345789 + // - if epochSecond=-12345 and millisecondOfSecond=789, then + // millisecondOfSecondComplement = 1000 - 1 - 789 = 210. So the encoded value will be + // -12345210. + + long encoded = instant.getEpochSecond() * 1000; + if (encoded < 0) { + // Convert the nanosecondOfSecond to millisecondOfSecond, compute its complement and subtract + // it + encoded -= 1000 - 1 - instant.getNano() / 1_000_000; + } else { + encoded += instant.getNano() / 1_000_000; + } + return encoded; + } +} diff --git a/core/src/test/java/com/scalar/db/api/ConditionBuilderTest.java b/core/src/test/java/com/scalar/db/api/ConditionBuilderTest.java index 37883a5144..4667d2391b 100644 --- a/core/src/test/java/com/scalar/db/api/ConditionBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/ConditionBuilderTest.java @@ -7,15 +7,27 @@ import com.scalar.db.io.BigIntColumn; import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.junit.jupiter.api.Test; public class ConditionBuilderTest { + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; @Test public void putIf_WithIsEqualToConditions_ShouldBuildProperly() { @@ -35,10 +47,14 @@ public void putIf_WithIsEqualToConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isEqualToBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.EQ)); assertThat(actual.getExpressions().get(1)) @@ -59,6 +75,16 @@ public void putIf_WithIsEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.EQ)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.EQ)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.EQ)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.EQ)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.EQ)); } @Test @@ -79,10 +105,14 @@ public void putIf_WithIsNotEqualToConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isNotEqualToBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isNotEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isNotEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isNotEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isNotEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.NE)); assertThat(actual.getExpressions().get(1)) @@ -103,6 +133,16 @@ public void putIf_WithIsNotEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.NE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.NE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.NE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.NE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.NE)); } @Test @@ -123,10 +163,14 @@ public void putIf_WithIsGreaterThanConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isGreaterThanBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isGreaterThanDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isGreaterThanTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isGreaterThanTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isGreaterThanTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.GT)); assertThat(actual.getExpressions().get(1)) @@ -147,6 +191,16 @@ public void putIf_WithIsGreaterThanConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.GT)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.GT)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.GT)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.GT)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.GT)); } @Test @@ -168,10 +222,15 @@ public void putIf_WithIsGreaterThanOrEqualToConditions_ShouldBuildProperly() { ConditionBuilder.column("col8") .isGreaterThanOrEqualToBlob( ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isGreaterThanOrEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isGreaterThanOrEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isGreaterThanOrEqualToTimestamp(ANY_TIMESTAMP)) + .and( + ConditionBuilder.column("col12").isGreaterThanOrEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.GTE)); assertThat(actual.getExpressions().get(1)) @@ -192,6 +251,17 @@ public void putIf_WithIsGreaterThanOrEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.GTE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.GTE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.GTE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.GTE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression( + TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.GTE)); } @Test @@ -212,10 +282,14 @@ public void putIf_WithIsLessThanConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isLessThanBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isLessThanDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isLessThanTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isLessThanTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isLessThanTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.LT)); assertThat(actual.getExpressions().get(1)) @@ -236,6 +310,16 @@ public void putIf_WithIsLessThanConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.LT)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.LT)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.LT)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.LT)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.LT)); } @Test @@ -257,10 +341,14 @@ public void putIf_WithIsLessThanOrEqualToConditions_ShouldBuildProperly() { ConditionBuilder.column("col8") .isLessThanOrEqualToBlob( ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isLessThanOrEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isLessThanOrEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isLessThanOrEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isLessThanOrEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.LTE)); assertThat(actual.getExpressions().get(1)) @@ -281,6 +369,17 @@ public void putIf_WithIsLessThanOrEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.LTE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.LTE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.LTE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.LTE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression( + TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.LTE)); } @Test @@ -296,10 +395,14 @@ public void putIf_WithIsNullConditions_ShouldBuildProperly() { .and(ConditionBuilder.column("col5").isNullDouble()) .and(ConditionBuilder.column("col6").isNullText()) .and(ConditionBuilder.column("col7").isNullBlob()) + .and(ConditionBuilder.column("col8").isNullDate()) + .and(ConditionBuilder.column("col9").isNullTime()) + .and(ConditionBuilder.column("col10").isNullTimestamp()) + .and(ConditionBuilder.column("col11").isNullTimestampTZ()) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(7); + assertThat(actual.getExpressions().size()).isEqualTo(11); assertThat(actual.getExpressions().get(0).getColumn()).isEqualTo(BooleanColumn.ofNull("col1")); assertThat(actual.getExpressions().get(0).getOperator()).isEqualTo(Operator.IS_NULL); assertThat(actual.getExpressions().get(1).getColumn()).isEqualTo(IntColumn.ofNull("col2")); @@ -314,6 +417,16 @@ public void putIf_WithIsNullConditions_ShouldBuildProperly() { assertThat(actual.getExpressions().get(5).getOperator()).isEqualTo(Operator.IS_NULL); assertThat(actual.getExpressions().get(6).getColumn()).isEqualTo(BlobColumn.ofNull("col7")); assertThat(actual.getExpressions().get(6).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(7).getColumn()).isEqualTo(DateColumn.ofNull("col8")); + assertThat(actual.getExpressions().get(7).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(8).getColumn()).isEqualTo(TimeColumn.ofNull("col9")); + assertThat(actual.getExpressions().get(8).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(9).getColumn()) + .isEqualTo(TimestampColumn.ofNull("col10")); + assertThat(actual.getExpressions().get(9).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(10).getColumn()) + .isEqualTo(TimestampTZColumn.ofNull("col11")); + assertThat(actual.getExpressions().get(10).getOperator()).isEqualTo(Operator.IS_NULL); } @Test @@ -329,10 +442,14 @@ public void putIf_WithIsNotNullConditions_ShouldBuildProperly() { .and(ConditionBuilder.column("col5").isNotNullDouble()) .and(ConditionBuilder.column("col6").isNotNullText()) .and(ConditionBuilder.column("col7").isNotNullBlob()) + .and(ConditionBuilder.column("col8").isNotNullDate()) + .and(ConditionBuilder.column("col9").isNotNullTime()) + .and(ConditionBuilder.column("col10").isNotNullTimestamp()) + .and(ConditionBuilder.column("col11").isNotNullTimestampTZ()) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(7); + assertThat(actual.getExpressions().size()).isEqualTo(11); assertThat(actual.getExpressions().get(0).getColumn()).isEqualTo(BooleanColumn.ofNull("col1")); assertThat(actual.getExpressions().get(0).getOperator()).isEqualTo(Operator.IS_NOT_NULL); assertThat(actual.getExpressions().get(1).getColumn()).isEqualTo(IntColumn.ofNull("col2")); @@ -347,6 +464,16 @@ public void putIf_WithIsNotNullConditions_ShouldBuildProperly() { assertThat(actual.getExpressions().get(5).getOperator()).isEqualTo(Operator.IS_NOT_NULL); assertThat(actual.getExpressions().get(6).getColumn()).isEqualTo(BlobColumn.ofNull("col7")); assertThat(actual.getExpressions().get(6).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(7).getColumn()).isEqualTo(DateColumn.ofNull("col8")); + assertThat(actual.getExpressions().get(7).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(8).getColumn()).isEqualTo(TimeColumn.ofNull("col9")); + assertThat(actual.getExpressions().get(8).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(9).getColumn()) + .isEqualTo(TimestampColumn.ofNull("col10")); + assertThat(actual.getExpressions().get(9).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(10).getColumn()) + .isEqualTo(TimestampTZColumn.ofNull("col11")); + assertThat(actual.getExpressions().get(10).getOperator()).isEqualTo(Operator.IS_NOT_NULL); } @Test @@ -389,10 +516,14 @@ public void deleteIf_WithIsEqualToConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isEqualToBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.EQ)); assertThat(actual.getExpressions().get(1)) @@ -413,6 +544,16 @@ public void deleteIf_WithIsEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.EQ)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.EQ)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.EQ)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.EQ)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.EQ)); } @Test @@ -433,10 +574,14 @@ public void deleteIf_WithIsNotEqualToConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isNotEqualToBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isNotEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isNotEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isNotEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isNotEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.NE)); assertThat(actual.getExpressions().get(1)) @@ -457,6 +602,16 @@ public void deleteIf_WithIsNotEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.NE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.NE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.NE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.NE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.NE)); } @Test @@ -477,10 +632,14 @@ public void deleteIf_WithIsGreaterThanConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isGreaterThanBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isGreaterThanDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isGreaterThanTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isGreaterThanTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isGreaterThanTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.GT)); assertThat(actual.getExpressions().get(1)) @@ -501,6 +660,16 @@ public void deleteIf_WithIsGreaterThanConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.GT)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.GT)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.GT)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.GT)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.GT)); } @Test @@ -523,10 +692,15 @@ public void deleteIf_WithIsGreaterThanOrEqualToConditions_ShouldBuildProperly() ConditionBuilder.column("col8") .isGreaterThanOrEqualToBlob( ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isGreaterThanOrEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isGreaterThanOrEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isGreaterThanOrEqualToTimestamp(ANY_TIMESTAMP)) + .and( + ConditionBuilder.column("col12").isGreaterThanOrEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.GTE)); assertThat(actual.getExpressions().get(1)) @@ -547,6 +721,17 @@ public void deleteIf_WithIsGreaterThanOrEqualToConditions_ShouldBuildProperly() .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.GTE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.GTE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.GTE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.GTE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression( + TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.GTE)); } @Test @@ -567,10 +752,14 @@ public void deleteIf_WithIsLessThanConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isLessThanBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isLessThanDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isLessThanTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isLessThanTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isLessThanTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.LT)); assertThat(actual.getExpressions().get(1)) @@ -591,6 +780,16 @@ public void deleteIf_WithIsLessThanConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.LT)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.LT)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.LT)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.LT)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.LT)); } @Test @@ -612,10 +811,14 @@ public void deleteIf_WithIsLessThanOrEqualToConditions_ShouldBuildProperly() { ConditionBuilder.column("col8") .isLessThanOrEqualToBlob( ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isLessThanOrEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isLessThanOrEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isLessThanOrEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isLessThanOrEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.LTE)); assertThat(actual.getExpressions().get(1)) @@ -636,6 +839,17 @@ public void deleteIf_WithIsLessThanOrEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.LTE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.LTE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.LTE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.LTE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression( + TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.LTE)); } @Test @@ -651,10 +865,14 @@ public void deleteIf_WithIsNullConditions_ShouldBuildProperly() { .and(ConditionBuilder.column("col5").isNullDouble()) .and(ConditionBuilder.column("col6").isNullText()) .and(ConditionBuilder.column("col7").isNullBlob()) + .and(ConditionBuilder.column("col8").isNullDate()) + .and(ConditionBuilder.column("col9").isNullTime()) + .and(ConditionBuilder.column("col10").isNullTimestamp()) + .and(ConditionBuilder.column("col11").isNullTimestampTZ()) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(7); + assertThat(actual.getExpressions().size()).isEqualTo(11); assertThat(actual.getExpressions().get(0).getColumn()).isEqualTo(BooleanColumn.ofNull("col1")); assertThat(actual.getExpressions().get(0).getOperator()).isEqualTo(Operator.IS_NULL); assertThat(actual.getExpressions().get(1).getColumn()).isEqualTo(IntColumn.ofNull("col2")); @@ -669,6 +887,16 @@ public void deleteIf_WithIsNullConditions_ShouldBuildProperly() { assertThat(actual.getExpressions().get(5).getOperator()).isEqualTo(Operator.IS_NULL); assertThat(actual.getExpressions().get(6).getColumn()).isEqualTo(BlobColumn.ofNull("col7")); assertThat(actual.getExpressions().get(6).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(7).getColumn()).isEqualTo(DateColumn.ofNull("col8")); + assertThat(actual.getExpressions().get(7).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(8).getColumn()).isEqualTo(TimeColumn.ofNull("col9")); + assertThat(actual.getExpressions().get(8).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(9).getColumn()) + .isEqualTo(TimestampColumn.ofNull("col10")); + assertThat(actual.getExpressions().get(9).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(10).getColumn()) + .isEqualTo(TimestampTZColumn.ofNull("col11")); + assertThat(actual.getExpressions().get(10).getOperator()).isEqualTo(Operator.IS_NULL); } @Test @@ -684,10 +912,14 @@ public void deleteIf_WithIsNotNullConditions_ShouldBuildProperly() { .and(ConditionBuilder.column("col5").isNotNullDouble()) .and(ConditionBuilder.column("col6").isNotNullText()) .and(ConditionBuilder.column("col7").isNotNullBlob()) + .and(ConditionBuilder.column("col8").isNotNullDate()) + .and(ConditionBuilder.column("col9").isNotNullTime()) + .and(ConditionBuilder.column("col10").isNotNullTimestamp()) + .and(ConditionBuilder.column("col11").isNotNullTimestampTZ()) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(7); + assertThat(actual.getExpressions().size()).isEqualTo(11); assertThat(actual.getExpressions().get(0).getColumn()).isEqualTo(BooleanColumn.ofNull("col1")); assertThat(actual.getExpressions().get(0).getOperator()).isEqualTo(Operator.IS_NOT_NULL); assertThat(actual.getExpressions().get(1).getColumn()).isEqualTo(IntColumn.ofNull("col2")); @@ -702,6 +934,16 @@ public void deleteIf_WithIsNotNullConditions_ShouldBuildProperly() { assertThat(actual.getExpressions().get(5).getOperator()).isEqualTo(Operator.IS_NOT_NULL); assertThat(actual.getExpressions().get(6).getColumn()).isEqualTo(BlobColumn.ofNull("col7")); assertThat(actual.getExpressions().get(6).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(7).getColumn()).isEqualTo(DateColumn.ofNull("col8")); + assertThat(actual.getExpressions().get(7).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(8).getColumn()).isEqualTo(TimeColumn.ofNull("col9")); + assertThat(actual.getExpressions().get(8).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(9).getColumn()) + .isEqualTo(TimestampColumn.ofNull("col10")); + assertThat(actual.getExpressions().get(9).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(10).getColumn()) + .isEqualTo(TimestampTZColumn.ofNull("col11")); + assertThat(actual.getExpressions().get(10).getOperator()).isEqualTo(Operator.IS_NOT_NULL); } @Test @@ -800,10 +1042,14 @@ public void updateIf_WithIsEqualToConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isEqualToBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.EQ)); assertThat(actual.getExpressions().get(1)) @@ -824,6 +1070,16 @@ public void updateIf_WithIsEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.EQ)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.EQ)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.EQ)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.EQ)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.EQ)); } @Test @@ -844,10 +1100,14 @@ public void updateIf_WithIsNotEqualToConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isNotEqualToBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isNotEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isNotEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isNotEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isNotEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.NE)); assertThat(actual.getExpressions().get(1)) @@ -868,6 +1128,16 @@ public void updateIf_WithIsNotEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.NE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.NE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.NE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.NE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.NE)); } @Test @@ -888,10 +1158,14 @@ public void updateIf_WithIsGreaterThanConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isGreaterThanBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isGreaterThanDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isGreaterThanTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isGreaterThanTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isGreaterThanTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.GT)); assertThat(actual.getExpressions().get(1)) @@ -912,6 +1186,16 @@ public void updateIf_WithIsGreaterThanConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.GT)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.GT)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.GT)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.GT)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.GT)); } @Test @@ -934,10 +1218,15 @@ public void updateIf_WithIsGreaterThanOrEqualToConditions_ShouldBuildProperly() ConditionBuilder.column("col8") .isGreaterThanOrEqualToBlob( ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isGreaterThanOrEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isGreaterThanOrEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isGreaterThanOrEqualToTimestamp(ANY_TIMESTAMP)) + .and( + ConditionBuilder.column("col12").isGreaterThanOrEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.GTE)); assertThat(actual.getExpressions().get(1)) @@ -958,6 +1247,17 @@ public void updateIf_WithIsGreaterThanOrEqualToConditions_ShouldBuildProperly() .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.GTE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.GTE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.GTE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.GTE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression( + TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.GTE)); } @Test @@ -978,10 +1278,14 @@ public void updateIf_WithIsLessThanConditions_ShouldBuildProperly() { .and( ConditionBuilder.column("col8") .isLessThanBlob(ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isLessThanDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isLessThanTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isLessThanTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isLessThanTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.LT)); assertThat(actual.getExpressions().get(1)) @@ -1002,6 +1306,16 @@ public void updateIf_WithIsLessThanConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.LT)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.LT)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.LT)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.LT)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression(TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.LT)); } @Test @@ -1023,10 +1337,14 @@ public void updateIf_WithIsLessThanOrEqualToConditions_ShouldBuildProperly() { ConditionBuilder.column("col8") .isLessThanOrEqualToBlob( ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)))) + .and(ConditionBuilder.column("col9").isLessThanOrEqualToDate(ANY_DATE)) + .and(ConditionBuilder.column("col10").isLessThanOrEqualToTime(ANY_TIME)) + .and(ConditionBuilder.column("col11").isLessThanOrEqualToTimestamp(ANY_TIMESTAMP)) + .and(ConditionBuilder.column("col12").isLessThanOrEqualToTimestampTZ(ANY_TIMESTAMPTZ)) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(8); + assertThat(actual.getExpressions().size()).isEqualTo(12); assertThat(actual.getExpressions().get(0)) .isEqualTo(new ConditionalExpression("col1", true, Operator.LTE)); assertThat(actual.getExpressions().get(1)) @@ -1047,6 +1365,17 @@ public void updateIf_WithIsLessThanOrEqualToConditions_ShouldBuildProperly() { .isEqualTo( new ConditionalExpression( "col8", ByteBuffer.wrap("blob2".getBytes(StandardCharsets.UTF_8)), Operator.LTE)); + assertThat(actual.getExpressions().get(8)) + .isEqualTo(new ConditionalExpression(DateColumn.of("col9", ANY_DATE), Operator.LTE)); + assertThat(actual.getExpressions().get(9)) + .isEqualTo(new ConditionalExpression(TimeColumn.of("col10", ANY_TIME), Operator.LTE)); + assertThat(actual.getExpressions().get(10)) + .isEqualTo( + new ConditionalExpression(TimestampColumn.of("col11", ANY_TIMESTAMP), Operator.LTE)); + assertThat(actual.getExpressions().get(11)) + .isEqualTo( + new ConditionalExpression( + TimestampTZColumn.of("col12", ANY_TIMESTAMPTZ), Operator.LTE)); } @Test @@ -1062,10 +1391,14 @@ public void updateIf_WithIsNullConditions_ShouldBuildProperly() { .and(ConditionBuilder.column("col5").isNullDouble()) .and(ConditionBuilder.column("col6").isNullText()) .and(ConditionBuilder.column("col7").isNullBlob()) + .and(ConditionBuilder.column("col8").isNullDate()) + .and(ConditionBuilder.column("col9").isNullTime()) + .and(ConditionBuilder.column("col10").isNullTimestamp()) + .and(ConditionBuilder.column("col11").isNullTimestampTZ()) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(7); + assertThat(actual.getExpressions().size()).isEqualTo(11); assertThat(actual.getExpressions().get(0).getColumn()).isEqualTo(BooleanColumn.ofNull("col1")); assertThat(actual.getExpressions().get(0).getOperator()).isEqualTo(Operator.IS_NULL); assertThat(actual.getExpressions().get(1).getColumn()).isEqualTo(IntColumn.ofNull("col2")); @@ -1080,6 +1413,16 @@ public void updateIf_WithIsNullConditions_ShouldBuildProperly() { assertThat(actual.getExpressions().get(5).getOperator()).isEqualTo(Operator.IS_NULL); assertThat(actual.getExpressions().get(6).getColumn()).isEqualTo(BlobColumn.ofNull("col7")); assertThat(actual.getExpressions().get(6).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(7).getColumn()).isEqualTo(DateColumn.ofNull("col8")); + assertThat(actual.getExpressions().get(7).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(8).getColumn()).isEqualTo(TimeColumn.ofNull("col9")); + assertThat(actual.getExpressions().get(8).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(9).getColumn()) + .isEqualTo(TimestampColumn.ofNull("col10")); + assertThat(actual.getExpressions().get(9).getOperator()).isEqualTo(Operator.IS_NULL); + assertThat(actual.getExpressions().get(10).getColumn()) + .isEqualTo(TimestampTZColumn.ofNull("col11")); + assertThat(actual.getExpressions().get(10).getOperator()).isEqualTo(Operator.IS_NULL); } @Test @@ -1095,10 +1438,14 @@ public void updateIf_WithIsNotNullConditions_ShouldBuildProperly() { .and(ConditionBuilder.column("col5").isNotNullDouble()) .and(ConditionBuilder.column("col6").isNotNullText()) .and(ConditionBuilder.column("col7").isNotNullBlob()) + .and(ConditionBuilder.column("col8").isNotNullDate()) + .and(ConditionBuilder.column("col9").isNotNullTime()) + .and(ConditionBuilder.column("col10").isNotNullTimestamp()) + .and(ConditionBuilder.column("col11").isNotNullTimestampTZ()) .build(); // Assert - assertThat(actual.getExpressions().size()).isEqualTo(7); + assertThat(actual.getExpressions().size()).isEqualTo(11); assertThat(actual.getExpressions().get(0).getColumn()).isEqualTo(BooleanColumn.ofNull("col1")); assertThat(actual.getExpressions().get(0).getOperator()).isEqualTo(Operator.IS_NOT_NULL); assertThat(actual.getExpressions().get(1).getColumn()).isEqualTo(IntColumn.ofNull("col2")); @@ -1113,5 +1460,15 @@ public void updateIf_WithIsNotNullConditions_ShouldBuildProperly() { assertThat(actual.getExpressions().get(5).getOperator()).isEqualTo(Operator.IS_NOT_NULL); assertThat(actual.getExpressions().get(6).getColumn()).isEqualTo(BlobColumn.ofNull("col7")); assertThat(actual.getExpressions().get(6).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(7).getColumn()).isEqualTo(DateColumn.ofNull("col8")); + assertThat(actual.getExpressions().get(7).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(8).getColumn()).isEqualTo(TimeColumn.ofNull("col9")); + assertThat(actual.getExpressions().get(8).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(9).getColumn()) + .isEqualTo(TimestampColumn.ofNull("col10")); + assertThat(actual.getExpressions().get(9).getOperator()).isEqualTo(Operator.IS_NOT_NULL); + assertThat(actual.getExpressions().get(10).getColumn()) + .isEqualTo(TimestampTZColumn.ofNull("col11")); + assertThat(actual.getExpressions().get(10).getOperator()).isEqualTo(Operator.IS_NOT_NULL); } } diff --git a/core/src/test/java/com/scalar/db/api/ConditionalExpressionTest.java b/core/src/test/java/com/scalar/db/api/ConditionalExpressionTest.java index a495231298..6037070552 100644 --- a/core/src/test/java/com/scalar/db/api/ConditionalExpressionTest.java +++ b/core/src/test/java/com/scalar/db/api/ConditionalExpressionTest.java @@ -9,6 +9,7 @@ import com.scalar.db.io.BlobValue; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.BooleanValue; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.DoubleValue; import com.scalar.db.io.FloatColumn; @@ -17,8 +18,15 @@ import com.scalar.db.io.IntValue; import com.scalar.db.io.TextColumn; import com.scalar.db.io.TextValue; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.junit.jupiter.api.Test; public class ConditionalExpressionTest { @@ -132,6 +140,18 @@ public void constructor_ColumnAndOperatorGiven_ShouldConstructProperly() { ConditionalExpression expression7 = new ConditionalExpression( BlobColumn.of("col7", "blob".getBytes(StandardCharsets.UTF_8)), Operator.EQ); + ConditionalExpression expression8 = + new ConditionalExpression(DateColumn.of("col8", LocalDate.ofEpochDay(123)), Operator.NE); + ConditionalExpression expression9 = + new ConditionalExpression(TimeColumn.of("col9", LocalTime.ofSecondOfDay(456)), Operator.GT); + ConditionalExpression expression10 = + new ConditionalExpression( + TimestampColumn.of( + "col10", LocalDateTime.of(LocalDate.ofEpochDay(123), LocalTime.ofSecondOfDay(456))), + Operator.LT); + ConditionalExpression expression11 = + new ConditionalExpression( + TimestampTZColumn.of("col10", Instant.ofEpochMilli(12345)), Operator.GTE); // Assert assertThat(expression1.getColumn()).isEqualTo(BooleanColumn.of("col1", true)); @@ -155,5 +175,19 @@ public void constructor_ColumnAndOperatorGiven_ShouldConstructProperly() { assertThat(expression7.getColumn()) .isEqualTo(BlobColumn.of("col7", "blob".getBytes(StandardCharsets.UTF_8))); assertThat(expression7.getOperator()).isEqualTo(Operator.EQ); + assertThat(expression8.getColumn()).isEqualTo(DateColumn.of("col8", LocalDate.ofEpochDay(123))); + assertThat(expression8.getOperator()).isEqualTo(Operator.NE); + assertThat(expression9.getColumn()) + .isEqualTo(TimeColumn.of("col9", LocalTime.ofSecondOfDay(456))); + assertThat(expression9.getOperator()).isEqualTo(Operator.GT); + assertThat(expression10.getColumn()) + .isEqualTo( + TimestampColumn.of( + "col10", + LocalDateTime.of(LocalDate.ofEpochDay(123), LocalTime.ofSecondOfDay(456)))); + assertThat(expression10.getOperator()).isEqualTo(Operator.LT); + assertThat(expression11.getColumn()) + .isEqualTo(TimestampTZColumn.of("col10", Instant.ofEpochMilli(12345))); + assertThat(expression11.getOperator()).isEqualTo(Operator.GTE); } } diff --git a/core/src/test/java/com/scalar/db/api/InsertBuilderTest.java b/core/src/test/java/com/scalar/db/api/InsertBuilderTest.java index 895e3eb751..ec94880f8e 100644 --- a/core/src/test/java/com/scalar/db/api/InsertBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/InsertBuilderTest.java @@ -4,10 +4,18 @@ import com.google.common.collect.ImmutableMap; import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -20,6 +28,12 @@ public class InsertBuilderTest { private static final String TABLE_1 = "table1"; private static final String TABLE_2 = "table2"; + + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Mock private Key partitionKey1; @Mock private Key partitionKey2; @Mock private Key clusteringKey1; @@ -84,6 +98,10 @@ public void build_WithAllParameters_ShouldBuildInsertCorrectly() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date", ANY_DATE) + .timeValue("time", ANY_TIME) + .timestampValue("timestamp", ANY_TIMESTAMP) + .timestampTZValue("timestamptz", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) @@ -94,7 +112,7 @@ public void build_WithAllParameters_ShouldBuildInsertCorrectly() { assertThat(actual.forTable()).hasValue(TABLE_1); Assertions.assertThat(actual.getPartitionKey()).isEqualTo(partitionKey1); assertThat(actual.getClusteringKey()).hasValue(clusteringKey1); - assertThat(actual.getColumns().size()).isEqualTo(14); + assertThat(actual.getColumns().size()).isEqualTo(18); assertThat(actual.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MAX_VALUE); assertThat(actual.getColumns().get("bigint2").getBigIntValue()) @@ -116,6 +134,11 @@ public void build_WithAllParameters_ShouldBuildInsertCorrectly() { .isEqualTo(Integer.valueOf(Integer.MAX_VALUE)); assertThat(actual.getColumns().get("text").getTextValue()).isEqualTo("a_value"); assertThat(actual.getColumns().get("text2").getTextValue()).isEqualTo("another_value"); + assertThat(actual.getColumns().get("date").getDateValue()).isEqualTo(ANY_DATE); + assertThat(actual.getColumns().get("time").getTimeValue()).isEqualTo(ANY_TIME); + assertThat(actual.getColumns().get("timestamp").getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(actual.getColumns().get("timestamptz").getTimestampTZValue()) + .isEqualTo(ANY_TIMESTAMPTZ); assertThat(actual.getAttributes()) .isEqualTo(ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3")); } @@ -136,6 +159,10 @@ public void build_WithAllValuesToNull_ShouldBuildInsertCorrectly() { .floatValue("float", null) .intValue("int", null) .textValue("text", null) + .dateValue("date", null) + .timeValue("time", null) + .timestampValue("timestamp", null) + .timestampTZValue("timestamptz", null) .build(); // Assert @@ -143,7 +170,7 @@ public void build_WithAllValuesToNull_ShouldBuildInsertCorrectly() { assertThat(actual.forTable()).hasValue(TABLE_1); Assertions.assertThat(actual.getPartitionKey()).isEqualTo(partitionKey1); assertThat(actual.getClusteringKey()).isEmpty(); - assertThat(actual.getColumns().size()).isEqualTo(8); + assertThat(actual.getColumns().size()).isEqualTo(12); assertThat(actual.getColumns().get("bigint").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("blob1").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("blob2").hasNullValue()).isTrue(); @@ -152,6 +179,10 @@ public void build_WithAllValuesToNull_ShouldBuildInsertCorrectly() { assertThat(actual.getColumns().get("float").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("int").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("text").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("date").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("time").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("timestamp").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("timestamptz").hasNullValue()).isTrue(); } @Test @@ -197,6 +228,10 @@ public void build_FromExistingWithoutChange_ShouldCopy() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date", ANY_DATE) + .timeValue("time", ANY_TIME) + .timestampValue("timestamp", ANY_TIMESTAMP) + .timestampTZValue("timestamptz", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) .build(); @@ -229,6 +264,10 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildInsertWithUpdate .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date", DateColumn.MAX_VALUE) + .timeValue("time", TimeColumn.MAX_VALUE) + .timestampValue("timestamp", TimestampColumn.MAX_VALUE) + .timestampTZValue("timestamptz", TimestampTZColumn.MAX_VALUE) .value(TextColumn.of("text2", "another_value")) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) @@ -255,6 +294,10 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildInsertWithUpdate .intValue("int1", Integer.MIN_VALUE) .intValue("int2", Integer.valueOf(Integer.MIN_VALUE)) .textValue("text", "another_value") + .dateValue("date", LocalDate.ofEpochDay(0)) + .timeValue("time", LocalTime.NOON) + .timestampValue("timestamp", LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.NOON)) + .timestampTZValue("timestamptz", Instant.EPOCH) .value(TextColumn.of("text2", "foo")) .clearAttributes() .attribute("a4", "v4") @@ -267,7 +310,7 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildInsertWithUpdate assertThat(newInsert.forTable()).hasValue(TABLE_2); Assertions.assertThat(newInsert.getPartitionKey()).isEqualTo(partitionKey2); assertThat(newInsert.getClusteringKey()).hasValue(clusteringKey2); - assertThat(newInsert.getColumns().size()).isEqualTo(14); + assertThat(newInsert.getColumns().size()).isEqualTo(18); assertThat(newInsert.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MIN_VALUE); assertThat(newInsert.getColumns().get("bigint2").getBigIntValue()) @@ -288,6 +331,13 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildInsertWithUpdate assertThat(newInsert.getColumns().get("int2").getIntValue()) .isEqualTo(Integer.valueOf(Integer.MIN_VALUE)); assertThat(newInsert.getColumns().get("text").getTextValue()).isEqualTo("another_value"); + assertThat(newInsert.getColumns().get("date").getDateValue()) + .isEqualTo(LocalDate.ofEpochDay(0)); + assertThat(newInsert.getColumns().get("time").getTimeValue()).isEqualTo(LocalTime.NOON); + assertThat(newInsert.getColumns().get("timestamp").getTimestampValue()) + .isEqualTo(LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.NOON)); + assertThat(newInsert.getColumns().get("timestamptz").getTimestampTZValue()) + .isEqualTo(Instant.EPOCH); assertThat(newInsert.getColumns().get("text2").getTextValue()).isEqualTo("foo"); assertThat(newInsert.getAttributes()) .isEqualTo(ImmutableMap.of("a4", "v4", "a5", "v5", "a6", "v6")); diff --git a/core/src/test/java/com/scalar/db/api/PutBuilderTest.java b/core/src/test/java/com/scalar/db/api/PutBuilderTest.java index 23535d738a..94c74cab62 100644 --- a/core/src/test/java/com/scalar/db/api/PutBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/PutBuilderTest.java @@ -7,14 +7,22 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.transaction.consensuscommit.ConsensusCommitOperationAttributes; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mock; @@ -26,6 +34,12 @@ public class PutBuilderTest { private static final String TABLE_1 = "table1"; private static final String TABLE_2 = "table2"; + + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Mock private Key partitionKey1; @Mock private Key partitionKey2; @Mock private Key clusteringKey1; @@ -87,6 +101,10 @@ public void build_WithAllParameters_ShouldBuildPutCorrectly() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text1", "a_value") + .dateValue("date", ANY_DATE) + .timeValue("time", ANY_TIME) + .timestampValue("timestamp", ANY_TIMESTAMP) + .timestampTZValue("timestamptz", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) .condition(condition1) .attribute("a1", "v1") @@ -130,6 +148,10 @@ public void build_WithAllParameters_ShouldBuildPutCorrectly() { .put("int1", IntColumn.of("int1", Integer.MAX_VALUE)) .put("int2", IntColumn.of("int2", Integer.MAX_VALUE)) .put("text1", TextColumn.of("text1", "a_value")) + .put("date", DateColumn.of("date", ANY_DATE)) + .put("time", TimeColumn.of("time", ANY_TIME)) + .put("timestamp", TimestampColumn.of("timestamp", ANY_TIMESTAMP)) + .put("timestamptz", TimestampTZColumn.of("timestamptz", ANY_TIMESTAMPTZ)) .put("text2", TextColumn.of("text2", "another_value")) .build())); } @@ -150,22 +172,37 @@ public void build_WithAllValuesToNull_ShouldBuildPutCorrectly() { .floatValue("float", null) .intValue("int", null) .textValue("text", null) + .dateValue("date", null) + .timeValue("time", null) + .timestampValue("timestamp", null) + .timestampTZValue("timestamptz", null) .build(); // Assert assertThat(put) .isEqualTo( - new Put(partitionKey1) - .forNamespace(NAMESPACE_1) - .forTable(TABLE_1) - .withBigIntValue("bigint", null) - .withBlobValue("blob1", (byte[]) null) - .withBlobValue("blob2", (ByteBuffer) null) - .withBooleanValue("bool", null) - .withDoubleValue("double", null) - .withFloatValue("float", null) - .withIntValue("int", null) - .withTextValue("text", null)); + new Put( + NAMESPACE_1, + TABLE_1, + partitionKey1, + null, + null, + ImmutableMap.of(), + null, + ImmutableMap.>builder() + .put("bigint", BigIntColumn.ofNull("bigint")) + .put("blob1", BlobColumn.ofNull("blob1")) + .put("blob2", BlobColumn.ofNull("blob2")) + .put("bool", BooleanColumn.ofNull("bool")) + .put("double", DoubleColumn.ofNull("double")) + .put("float", FloatColumn.ofNull("float")) + .put("int", IntColumn.ofNull("int")) + .put("text", TextColumn.ofNull("text")) + .put("date", DateColumn.ofNull("date")) + .put("time", TimeColumn.ofNull("time")) + .put("timestamp", TimestampColumn.ofNull("timestamp")) + .put("timestamptz", TimestampTZColumn.ofNull("timestamptz")) + .build())); } @Test @@ -216,6 +253,10 @@ public void build_FromExistingWithoutChange_ShouldCopy() { .put("int2", IntColumn.of("int2", Integer.MAX_VALUE)) .put("text1", TextColumn.of("text1", "a_value")) .put("text2", TextColumn.of("text2", "another_value")) + .put("date", DateColumn.of("date", ANY_DATE)) + .put("time", TimeColumn.of("time", ANY_TIME)) + .put("timestamp", TimestampColumn.of("timestamp", ANY_TIMESTAMP)) + .put("timestamptz", TimestampTZColumn.of("timestamptz", ANY_TIMESTAMPTZ)) .build()); // Act @@ -252,6 +293,11 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildPutWithUpdatedPa .put("int2", IntColumn.of("int2", Integer.MAX_VALUE)) .put("text1", TextColumn.of("text1", "a_value")) .put("text2", TextColumn.of("text2", "another_value")) + .put("date", DateColumn.of("date", DateColumn.MAX_VALUE)) + .put("time", TimeColumn.of("time", TimeColumn.MAX_VALUE)) + .put("timestamp", TimestampColumn.of("timestamp", TimestampColumn.MAX_VALUE)) + .put( + "timestamptz", TimestampTZColumn.of("timestamptz", TimestampTZColumn.MAX_VALUE)) .build()); // Act @@ -276,6 +322,10 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildPutWithUpdatedPa .intValue("int1", Integer.MIN_VALUE) .intValue("int2", Integer.valueOf(Integer.MIN_VALUE)) .textValue("text1", "another_value") + .dateValue("date", LocalDate.ofEpochDay(0)) + .timeValue("time", LocalTime.NOON) + .timestampValue("timestamp", LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.NOON)) + .timestampTZValue("timestamptz", Instant.EPOCH) .value(TextColumn.of("text2", "foo")) .condition(condition2) .clearAttributes() @@ -322,6 +372,13 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildPutWithUpdatedPa .put("int2", IntColumn.of("int2", Integer.MIN_VALUE)) .put("text1", TextColumn.of("text1", "another_value")) .put("text2", TextColumn.of("text2", "foo")) + .put("date", DateColumn.of("date", LocalDate.ofEpochDay(0))) + .put("time", TimeColumn.of("time", LocalTime.NOON)) + .put( + "timestamp", + TimestampColumn.of( + "timestamp", LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.NOON))) + .put("timestamptz", TimestampTZColumn.of("timestamptz", Instant.EPOCH)) .build())); } diff --git a/core/src/test/java/com/scalar/db/api/PutTest.java b/core/src/test/java/com/scalar/db/api/PutTest.java index a6106a06e8..207d6d6277 100644 --- a/core/src/test/java/com/scalar/db/api/PutTest.java +++ b/core/src/test/java/com/scalar/db/api/PutTest.java @@ -12,6 +12,7 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.BooleanValue; import com.scalar.db.io.Column; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.DoubleValue; import com.scalar.db.io.FloatColumn; @@ -21,10 +22,17 @@ import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; import com.scalar.db.io.TextValue; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.io.Value; import com.scalar.db.util.ScalarDbUtils; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Arrays; import java.util.Map; import java.util.Optional; @@ -583,4 +591,125 @@ public void getAttribute_ShouldReturnProperValues() { assertThat(put.getAttribute("a3")).hasValue("v3"); assertThat(put.getAttributes()).isEqualTo(ImmutableMap.of("a1", "v1", "a2", "v2", "a3", "v3")); } + + @Test + public void getDateValue_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + LocalDate anyDate = LocalDate.of(1234, 5, 6); + DateColumn dateCol = DateColumn.of("date_col", anyDate); + TextColumn key = TextColumn.of("foo_col", "foo"); + Put put = + new Put( + "ns", + "tbl", + Key.of(), + null, + null, + ImmutableMap.of(), + null, + ImmutableMap.of( + "foo_col", + key, + "date_col", + dateCol, + "bool_col", + BooleanColumn.of("bool_col", true))); + + // Act + LocalDate actualDate = put.getDateValue("date_col"); + + // Assert + assertThat(actualDate).isEqualTo(anyDate); + } + + @Test + public void getTimeValue_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + LocalTime anyTime = LocalTime.ofSecondOfDay(12345); + TimeColumn timeCol = TimeColumn.of("date_col", anyTime); + + Put put = + new Put( + "ns", + "tbl", + Key.of(), + null, + null, + ImmutableMap.of(), + null, + ImmutableMap.of( + "foo_col", + TextColumn.of("foo_col", "foo"), + "time_col", + timeCol, + "bool_col", + BooleanColumn.of("bool_col", true))); + + // Act + LocalTime actualTime = put.getTimeValue("time_col"); + + // Assert + assertThat(actualTime).isEqualTo(anyTime); + } + + @Test + public void getTimestampValue_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + LocalDateTime anyTimestamp = + LocalDateTime.of(LocalDate.of(1234, 5, 6), LocalTime.ofSecondOfDay(12345)); + TimestampColumn timestampColumn = TimestampColumn.of("timestamp_col", anyTimestamp); + TextColumn key = TextColumn.of("foo_col", "foo"); + Put put = + new Put( + "ns", + "tbl", + Key.of(), + null, + null, + ImmutableMap.of(), + null, + ImmutableMap.of( + "foo_col", + key, + "timestamp_col", + timestampColumn, + "bool_col", + BooleanColumn.of("bool_col", true))); + + // Act + LocalDateTime actualTimestamp = put.getTimestampValue("timestamp_col"); + + // Assert + assertThat(actualTimestamp).isEqualTo(anyTimestamp); + } + + @Test + public void getTimestampTZValue_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + Instant anyInstant = Instant.ofEpochSecond(12355); + TimestampTZColumn timestampTZColumn = TimestampTZColumn.of("timestamp_col", anyInstant); + TextColumn key = TextColumn.of("foo_col", "foo"); + Put put = + new Put( + "ns", + "tbl", + Key.of(), + null, + null, + ImmutableMap.of(), + null, + ImmutableMap.of( + "foo_col", + key, + "timestampTZ_col", + timestampTZColumn, + "bool_col", + BooleanColumn.of("bool_col", true))); + + // Act + Instant actualTimestampTZ = put.getTimestampTZValue("timestampTZ_col"); + + // Assert + assertThat(actualTimestampTZ).isEqualTo(anyInstant); + } } diff --git a/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java b/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java index 1d2691138b..6a8eb8462a 100644 --- a/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/UpdateBuilderTest.java @@ -4,10 +4,18 @@ import com.google.common.collect.ImmutableMap; import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -20,6 +28,12 @@ public class UpdateBuilderTest { private static final String TABLE_1 = "table1"; private static final String TABLE_2 = "table2"; + + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Mock private Key partitionKey1; @Mock private Key partitionKey2; @Mock private Key clusteringKey1; @@ -88,6 +102,10 @@ public void build_WithAllParameters_ShouldBuildUpdateCorrectly() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date1", ANY_DATE) + .timeValue("time1", ANY_TIME) + .timestampValue("timestamp1", ANY_TIMESTAMP) + .timestampTZValue("timestampTZ1", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) .condition(condition1) .attribute("a1", "v1") @@ -101,7 +119,7 @@ public void build_WithAllParameters_ShouldBuildUpdateCorrectly() { assertThat(actual.forTable()).hasValue(TABLE_1); Assertions.assertThat(actual.getPartitionKey()).isEqualTo(partitionKey1); assertThat(actual.getClusteringKey()).hasValue(clusteringKey1); - assertThat(actual.getColumns().size()).isEqualTo(14); + assertThat(actual.getColumns().size()).isEqualTo(18); assertThat(actual.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MAX_VALUE); assertThat(actual.getColumns().get("bigint2").getBigIntValue()) @@ -123,6 +141,11 @@ public void build_WithAllParameters_ShouldBuildUpdateCorrectly() { .isEqualTo(Integer.valueOf(Integer.MAX_VALUE)); assertThat(actual.getColumns().get("text").getTextValue()).isEqualTo("a_value"); assertThat(actual.getColumns().get("text2").getTextValue()).isEqualTo("another_value"); + assertThat(actual.getColumns().get("date1").getDateValue()).isEqualTo(ANY_DATE); + assertThat(actual.getColumns().get("time1").getTimeValue()).isEqualTo(ANY_TIME); + assertThat(actual.getColumns().get("timestamp1").getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(actual.getColumns().get("timestampTZ1").getTimestampTZValue()) + .isEqualTo(ANY_TIMESTAMPTZ); assertThat(actual.getCondition()).hasValue(condition1); assertThat(actual.getAttributes()) .isEqualTo( @@ -155,6 +178,10 @@ public void build_WithAllValuesToNull_ShouldBuildUpdateCorrectly() { .floatValue("float", null) .intValue("int", null) .textValue("text", null) + .dateValue("date", null) + .timeValue("time", null) + .timestampValue("timestamp", null) + .timestampTZValue("timestampTZ", null) .build(); // Assert @@ -162,7 +189,7 @@ public void build_WithAllValuesToNull_ShouldBuildUpdateCorrectly() { assertThat(actual.forTable()).hasValue(TABLE_1); Assertions.assertThat(actual.getPartitionKey()).isEqualTo(partitionKey1); assertThat(actual.getClusteringKey()).isEmpty(); - assertThat(actual.getColumns().size()).isEqualTo(8); + assertThat(actual.getColumns().size()).isEqualTo(12); assertThat(actual.getColumns().get("bigint").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("blob1").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("blob2").hasNullValue()).isTrue(); @@ -171,6 +198,10 @@ public void build_WithAllValuesToNull_ShouldBuildUpdateCorrectly() { assertThat(actual.getColumns().get("float").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("int").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("text").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("date").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("time").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("timestamp").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("timestampTZ").hasNullValue()).isTrue(); assertThat(actual.getCondition()).isEmpty(); } @@ -218,7 +249,15 @@ public void build_FromExistingWithoutChange_ShouldCopy() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date1", ANY_DATE) + .timeValue("time1", ANY_TIME) + .timestampValue("timestamp1", ANY_TIMESTAMP) + .timestampTZValue("timestampTZ1", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) + .dateValue("date1", ANY_DATE) + .timeValue("time1", ANY_TIME) + .timestampValue("timestamp1", ANY_TIMESTAMP) + .timestampTZValue("timestampTZ1", ANY_TIMESTAMPTZ) .condition(condition1) .build(); @@ -251,6 +290,10 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdate .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date1", DateColumn.MAX_VALUE) + .timeValue("time1", TimeColumn.MAX_VALUE) + .timestampValue("timestamp1", TimestampColumn.MAX_VALUE) + .timestampTZValue("timestampTZ1", TimestampTZColumn.MAX_VALUE) .value(TextColumn.of("text2", "another_value")) .condition(condition1) .attribute("a1", "v1") @@ -288,6 +331,11 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdate .intValue("int1", Integer.MIN_VALUE) .intValue("int2", Integer.valueOf(Integer.MIN_VALUE)) .textValue("text", "another_value") + .dateValue("date1", LocalDate.ofEpochDay(123)) + .timeValue("time1", LocalTime.ofSecondOfDay(456)) + .timestampValue( + "timestamp1", LocalDateTime.of(LocalDate.ofEpochDay(12354), LocalTime.NOON)) + .timestampTZValue("timestampTZ1", Instant.ofEpochSecond(-12)) .value(TextColumn.of("text2", "foo")) .condition(condition2) .clearAttributes() @@ -308,7 +356,7 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdate assertThat(newUpdate1.forTable()).hasValue(TABLE_2); Assertions.assertThat(newUpdate1.getPartitionKey()).isEqualTo(partitionKey2); assertThat(newUpdate1.getClusteringKey()).hasValue(clusteringKey2); - assertThat(newUpdate1.getColumns().size()).isEqualTo(14); + assertThat(newUpdate1.getColumns().size()).isEqualTo(18); assertThat(newUpdate1.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MIN_VALUE); assertThat(newUpdate1.getColumns().get("bigint2").getBigIntValue()) @@ -329,6 +377,14 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpdateWithUpdate assertThat(newUpdate1.getColumns().get("int2").getIntValue()) .isEqualTo(Integer.valueOf(Integer.MIN_VALUE)); assertThat(newUpdate1.getColumns().get("text").getTextValue()).isEqualTo("another_value"); + assertThat(newUpdate1.getColumns().get("date1").getDateValue()) + .isEqualTo(LocalDate.ofEpochDay(123)); + assertThat(newUpdate1.getColumns().get("time1").getTimeValue()) + .isEqualTo(LocalTime.ofSecondOfDay(456)); + assertThat(newUpdate1.getColumns().get("timestamp1").getTimestampValue()) + .isEqualTo(LocalDateTime.of(LocalDate.ofEpochDay(12354), LocalTime.NOON)); + assertThat(newUpdate1.getColumns().get("timestampTZ1").getTimestampTZValue()) + .isEqualTo(Instant.ofEpochSecond(-12)); assertThat(newUpdate1.getColumns().get("text2").getTextValue()).isEqualTo("foo"); assertThat(newUpdate1.getCondition()).hasValue(condition2); assertThat(newUpdate1.getAttributes()) diff --git a/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java b/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java index 7d43a9c878..ef3063e1c2 100644 --- a/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java +++ b/core/src/test/java/com/scalar/db/api/UpsertBuilderTest.java @@ -4,10 +4,18 @@ import com.google.common.collect.ImmutableMap; import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -20,6 +28,12 @@ public class UpsertBuilderTest { private static final String TABLE_1 = "table1"; private static final String TABLE_2 = "table2"; + + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Mock private Key partitionKey1; @Mock private Key partitionKey2; @Mock private Key clusteringKey1; @@ -84,6 +98,10 @@ public void build_WithAllParameters_ShouldBuildUpsertCorrectly() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date", ANY_DATE) + .timeValue("time", ANY_TIME) + .timestampValue("timestamp", ANY_TIMESTAMP) + .timestampTZValue("timestamptz", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) @@ -96,7 +114,7 @@ public void build_WithAllParameters_ShouldBuildUpsertCorrectly() { assertThat(actual.forTable()).hasValue(TABLE_1); Assertions.assertThat(actual.getPartitionKey()).isEqualTo(partitionKey1); assertThat(actual.getClusteringKey()).hasValue(clusteringKey1); - assertThat(actual.getColumns().size()).isEqualTo(14); + assertThat(actual.getColumns().size()).isEqualTo(18); assertThat(actual.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MAX_VALUE); assertThat(actual.getColumns().get("bigint2").getBigIntValue()) @@ -117,6 +135,11 @@ public void build_WithAllParameters_ShouldBuildUpsertCorrectly() { assertThat(actual.getColumns().get("int2").getIntValue()) .isEqualTo(Integer.valueOf(Integer.MAX_VALUE)); assertThat(actual.getColumns().get("text").getTextValue()).isEqualTo("a_value"); + assertThat(actual.getColumns().get("date").getDateValue()).isEqualTo(ANY_DATE); + assertThat(actual.getColumns().get("time").getTimeValue()).isEqualTo(ANY_TIME); + assertThat(actual.getColumns().get("timestamp").getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(actual.getColumns().get("timestamptz").getTimestampTZValue()) + .isEqualTo(ANY_TIMESTAMPTZ); assertThat(actual.getColumns().get("text2").getTextValue()).isEqualTo("another_value"); assertThat(actual.getAttributes()) .isEqualTo( @@ -149,6 +172,10 @@ public void build_WithAllValuesToNull_ShouldBuildUpsertCorrectly() { .floatValue("float", null) .intValue("int", null) .textValue("text", null) + .dateValue("date", null) + .timeValue("time", null) + .timestampValue("timestamp", null) + .timestampTZValue("timestamptz", null) .build(); // Assert @@ -156,7 +183,7 @@ public void build_WithAllValuesToNull_ShouldBuildUpsertCorrectly() { assertThat(actual.forTable()).hasValue(TABLE_1); Assertions.assertThat(actual.getPartitionKey()).isEqualTo(partitionKey1); assertThat(actual.getClusteringKey()).isEmpty(); - assertThat(actual.getColumns().size()).isEqualTo(8); + assertThat(actual.getColumns().size()).isEqualTo(12); assertThat(actual.getColumns().get("bigint").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("blob1").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("blob2").hasNullValue()).isTrue(); @@ -165,6 +192,10 @@ public void build_WithAllValuesToNull_ShouldBuildUpsertCorrectly() { assertThat(actual.getColumns().get("float").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("int").hasNullValue()).isTrue(); assertThat(actual.getColumns().get("text").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("date").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("time").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("timestamp").hasNullValue()).isTrue(); + assertThat(actual.getColumns().get("timestamptz").hasNullValue()).isTrue(); } @Test @@ -210,6 +241,10 @@ public void build_FromExistingWithoutChange_ShouldCopy() { .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date", ANY_DATE) + .timeValue("time", ANY_TIME) + .timestampValue("timestamp", ANY_TIMESTAMP) + .timestampTZValue("timestamptz", ANY_TIMESTAMPTZ) .value(TextColumn.of("text2", "another_value")) .build(); @@ -242,6 +277,10 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdate .intValue("int1", Integer.MAX_VALUE) .intValue("int2", Integer.valueOf(Integer.MAX_VALUE)) .textValue("text", "a_value") + .dateValue("date", DateColumn.MAX_VALUE) + .timeValue("time", TimeColumn.MAX_VALUE) + .timestampValue("timestamp", TimestampColumn.MAX_VALUE) + .timestampTZValue("timestamptz", TimestampTZColumn.MAX_VALUE) .value(TextColumn.of("text2", "another_value")) .attribute("a1", "v1") .attributes(ImmutableMap.of("a2", "v2", "a3", "v3")) @@ -278,6 +317,11 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdate .intValue("int1", Integer.MIN_VALUE) .intValue("int2", Integer.valueOf(Integer.MIN_VALUE)) .textValue("text", "another_value") + .dateValue("date1", LocalDate.ofEpochDay(123)) + .timeValue("time1", LocalTime.ofSecondOfDay(456)) + .timestampValue( + "timestamp1", LocalDateTime.of(LocalDate.ofEpochDay(12354), LocalTime.NOON)) + .timestampTZValue("timestampTZ1", Instant.ofEpochSecond(-12)) .value(TextColumn.of("text2", "foo")) .clearAttributes() .attribute("a4", "v4") @@ -297,7 +341,7 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdate assertThat(newUpsert1.forTable()).hasValue(TABLE_2); Assertions.assertThat(newUpsert1.getPartitionKey()).isEqualTo(partitionKey2); assertThat(newUpsert1.getClusteringKey()).hasValue(clusteringKey2); - assertThat(newUpsert1.getColumns().size()).isEqualTo(14); + assertThat(newUpsert1.getColumns().size()).isEqualTo(18); assertThat(newUpsert1.getColumns().get("bigint1").getBigIntValue()) .isEqualTo(BigIntColumn.MIN_VALUE); assertThat(newUpsert1.getColumns().get("bigint2").getBigIntValue()) @@ -319,6 +363,14 @@ public void build_FromExistingAndUpdateAllParameters_ShouldBuildUpsertWithUpdate .isEqualTo(Integer.valueOf(Integer.MIN_VALUE)); assertThat(newUpsert1.getColumns().get("text").getTextValue()).isEqualTo("another_value"); assertThat(newUpsert1.getColumns().get("text2").getTextValue()).isEqualTo("foo"); + assertThat(newUpsert1.getColumns().get("date1").getDateValue()) + .isEqualTo(LocalDate.ofEpochDay(123)); + assertThat(newUpsert1.getColumns().get("time1").getTimeValue()) + .isEqualTo(LocalTime.ofSecondOfDay(456)); + assertThat(newUpsert1.getColumns().get("timestamp1").getTimestampValue()) + .isEqualTo(LocalDateTime.of(LocalDate.ofEpochDay(12354), LocalTime.NOON)); + assertThat(newUpsert1.getColumns().get("timestampTZ1").getTimestampTZValue()) + .isEqualTo(Instant.ofEpochSecond(-12)); assertThat(newUpsert1.getAttributes()) .isEqualTo( ImmutableMap.of( diff --git a/core/src/test/java/com/scalar/db/common/ResultImplTest.java b/core/src/test/java/com/scalar/db/common/ResultImplTest.java index 8f06cdb147..82aa9708ac 100644 --- a/core/src/test/java/com/scalar/db/common/ResultImplTest.java +++ b/core/src/test/java/com/scalar/db/common/ResultImplTest.java @@ -14,6 +14,7 @@ import com.scalar.db.io.BooleanValue; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.DoubleValue; import com.scalar.db.io.FloatColumn; @@ -23,9 +24,15 @@ import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; import com.scalar.db.io.TextValue; -import com.scalar.db.io.Value; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -47,7 +54,14 @@ public class ResultImplTest { private static final String ANY_COLUMN_NAME_5 = "col5"; private static final String ANY_COLUMN_NAME_6 = "col6"; private static final String ANY_COLUMN_NAME_7 = "col7"; - + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; + private static final String ANY_COLUMN_NAME_11 = "col11"; + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; private static final TableMetadata TABLE_METADATA = TableMetadata.newBuilder() .addColumn(ANY_NAME_1, DataType.TEXT) @@ -59,6 +73,10 @@ public class ResultImplTest { .addColumn(ANY_COLUMN_NAME_5, DataType.DOUBLE) .addColumn(ANY_COLUMN_NAME_6, DataType.TEXT) .addColumn(ANY_COLUMN_NAME_7, DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMP) + .addColumn(ANY_COLUMN_NAME_11, DataType.TIMESTAMPTZ) .addPartitionKey(ANY_NAME_1) .addClusteringKey(ANY_NAME_2) .build(); @@ -80,6 +98,10 @@ public void setUp() { .put( ANY_COLUMN_NAME_7, BlobColumn.of(ANY_COLUMN_NAME_7, "bytes".getBytes(StandardCharsets.UTF_8))) + .put(ANY_COLUMN_NAME_8, DateColumn.of(ANY_COLUMN_NAME_8, ANY_DATE)) + .put(ANY_COLUMN_NAME_9, TimeColumn.of(ANY_COLUMN_NAME_9, ANY_TIME)) + .put(ANY_COLUMN_NAME_10, TimestampColumn.of(ANY_COLUMN_NAME_10, ANY_TIMESTAMP)) + .put(ANY_COLUMN_NAME_11, TimestampTZColumn.of(ANY_COLUMN_NAME_11, ANY_TIMESTAMPTZ)) .build(); } @@ -122,7 +144,11 @@ public void getValue_ProperValuesGivenInConstructor_ShouldReturnWhatsSet() { ANY_COLUMN_NAME_4, ANY_COLUMN_NAME_5, ANY_COLUMN_NAME_6, - ANY_COLUMN_NAME_7))); + ANY_COLUMN_NAME_7, + ANY_COLUMN_NAME_8, + ANY_COLUMN_NAME_9, + ANY_COLUMN_NAME_10, + ANY_COLUMN_NAME_11))); assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); @@ -174,6 +200,26 @@ public void getValue_ProperValuesGivenInConstructor_ShouldReturnWhatsSet() { .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); assertThat(result.getAsObject(ANY_COLUMN_NAME_7)) .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.getAsObject(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.getAsObject(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + assertThat(result.getAsObject(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(result.getAsObject(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); } @Test @@ -383,14 +429,14 @@ public void getValues_ProperValuesGivenInConstructor_ShouldReturnWhatsSet() { ResultImpl result = new ResultImpl(columns, TABLE_METADATA); // Act - Map> actual = result.getValues(); + Map> actual = result.getColumns(); // Assert - assertThat(actual.get(ANY_NAME_1)).isEqualTo(new TextValue(ANY_NAME_1, ANY_TEXT_1)); - assertThat(actual.get(ANY_NAME_2)).isEqualTo(new TextValue(ANY_NAME_2, ANY_TEXT_2)); - assertThat(actual.get(ANY_COLUMN_NAME_1)).isEqualTo(new BooleanValue(ANY_COLUMN_NAME_1, true)); + assertThat(actual.get(ANY_NAME_1)).isEqualTo(TextColumn.of(ANY_NAME_1, ANY_TEXT_1)); + assertThat(actual.get(ANY_NAME_2)).isEqualTo(TextColumn.of(ANY_NAME_2, ANY_TEXT_2)); + assertThat(actual.get(ANY_COLUMN_NAME_1)).isEqualTo(BooleanColumn.of(ANY_COLUMN_NAME_1, true)); assertThat(actual.get(ANY_COLUMN_NAME_7)) - .isEqualTo(new BlobValue(ANY_COLUMN_NAME_7, "bytes".getBytes(StandardCharsets.UTF_8))); + .isEqualTo(BlobColumn.of(ANY_COLUMN_NAME_7, "bytes".getBytes(StandardCharsets.UTF_8))); } @Test @@ -400,7 +446,7 @@ public void getValues_NoValuesGivenInConstructor_ShouldReturnEmptyValues() { ResultImpl result = new ResultImpl(emptyValues, TABLE_METADATA); // Act - Map> actual = result.getValues(); + Map> actual = result.getColumns(); // Assert assertThat(actual.isEmpty()).isTrue(); @@ -410,10 +456,10 @@ public void getValues_NoValuesGivenInConstructor_ShouldReturnEmptyValues() { public void getValues_TryToModifyReturned_ShouldThrowException() { // Arrange ResultImpl result = new ResultImpl(columns, TABLE_METADATA); - Map> values = result.getValues(); + Map> values = result.getColumns(); // Act Assert - assertThatThrownBy(() -> values.put("new", new TextValue(ANY_NAME_1, ANY_TEXT_1))) + assertThatThrownBy(() -> values.put("new", TextColumn.of(ANY_NAME_1, ANY_TEXT_1))) .isInstanceOf(UnsupportedOperationException.class); } @@ -426,7 +472,7 @@ public void getColumns_ProperValuesGivenInConstructor_ShouldReturnWhatsSet() { Map> columns = result.getColumns(); // Assert - assertThat(columns.size()).isEqualTo(9); + assertThat(columns.size()).isEqualTo(13); assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); @@ -446,6 +492,14 @@ public void getColumns_ProperValuesGivenInConstructor_ShouldReturnWhatsSet() { assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValue()) .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); } @Test diff --git a/core/src/test/java/com/scalar/db/io/BigIntColumnTest.java b/core/src/test/java/com/scalar/db/io/BigIntColumnTest.java index 3cb8aae0ab..d9df170b74 100644 --- a/core/src/test/java/com/scalar/db/io/BigIntColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/BigIntColumnTest.java @@ -33,6 +33,11 @@ public void of_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -72,6 +77,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -99,6 +109,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/BlobColumnTest.java b/core/src/test/java/com/scalar/db/io/BlobColumnTest.java index 4ebc7f86e2..16418cf497 100644 --- a/core/src/test/java/com/scalar/db/io/BlobColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/BlobColumnTest.java @@ -37,6 +37,11 @@ public void of_ProperByteBufferValueGiven_ShouldReturnWhatsSet() { assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -66,6 +71,11 @@ public void of_ProperByteArrayValueGiven_ShouldReturnWhatsSet() { assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -90,6 +100,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -121,6 +136,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/BooleanColumnTest.java b/core/src/test/java/com/scalar/db/io/BooleanColumnTest.java index e5f7975442..5daa9d529b 100644 --- a/core/src/test/java/com/scalar/db/io/BooleanColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/BooleanColumnTest.java @@ -32,6 +32,11 @@ public void of_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -58,6 +63,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -85,6 +95,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/DateColumnTest.java b/core/src/test/java/com/scalar/db/io/DateColumnTest.java new file mode 100644 index 0000000000..f8465607f1 --- /dev/null +++ b/core/src/test/java/com/scalar/db/io/DateColumnTest.java @@ -0,0 +1,137 @@ +package com.scalar.db.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.*; + +import java.time.Clock; +import java.time.LocalDate; +import org.junit.jupiter.api.Test; + +class DateColumnTest { + private static final LocalDate ANY_DATE = LocalDate.now(Clock.systemUTC()); + + @Test + public void of_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + DateColumn column = DateColumn.of("col", ANY_DATE); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_DATE); + assertThat(column.getDateValue()).isEqualTo(ANY_DATE); + assertThat(column.getDataType()).isEqualTo(DataType.DATE); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_DATE); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + DateColumn column = DateColumn.ofNull("col"); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isNotPresent(); + assertThat(column.getDateValue()).isNull(); + assertThat(column.getDataType()).isEqualTo(DataType.DATE); + assertThat(column.hasNullValue()).isTrue(); + assertThat(column.getValueAsObject()).isNull(); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { + // Arrange + + // Act + DateColumn column = DateColumn.of("col", ANY_DATE).copyWith("col2"); + + // Assert + assertThat(column.getName()).isEqualTo("col2"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_DATE); + assertThat(column.getDateValue()).isEqualTo(ANY_DATE); + assertThat(column.getDataType()).isEqualTo(DataType.DATE); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_DATE); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void compareTo_ShouldReturnProperResults() { + // Arrange + DateColumn column = DateColumn.of("col", ANY_DATE); + + // Act Assert + assertThat(column.compareTo(DateColumn.of("col", ANY_DATE))).isZero(); + assertThat(column.compareTo(DateColumn.of("col", ANY_DATE.minusDays(1)))).isPositive(); + assertThat(column.compareTo(DateColumn.of("col", ANY_DATE.plusDays(1)))).isNegative(); + assertThat(column.compareTo(DateColumn.ofNull("col"))).isPositive(); + } + + @Test + public void constructor_valueInsideRange_ShouldNotThrowException() { + // Act Assert + assertDoesNotThrow(() -> DateColumn.of("col", DateColumn.MIN_VALUE)); + assertDoesNotThrow(() -> DateColumn.of("col", DateColumn.MAX_VALUE)); + } + + @Test + public void constructor_valueOutOfRange_ShouldThrowIllegalArgumentException() { + // Arrange + LocalDate dateBeforeRangeMin = DateColumn.MIN_VALUE.minusDays(1); + LocalDate dateAfterRangeMax = DateColumn.MAX_VALUE.plusDays(1); + + // Act Assert + assertThatThrownBy(() -> DateColumn.of("col", dateBeforeRangeMin)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> DateColumn.of("col", dateAfterRangeMax)) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/io/DoubleColumnTest.java b/core/src/test/java/com/scalar/db/io/DoubleColumnTest.java index bc3a260720..c7b668a4b3 100644 --- a/core/src/test/java/com/scalar/db/io/DoubleColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/DoubleColumnTest.java @@ -32,6 +32,11 @@ public void of_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -58,6 +63,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -85,6 +95,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/FloatColumnTest.java b/core/src/test/java/com/scalar/db/io/FloatColumnTest.java index 3119cda8ff..7d5f3724db 100644 --- a/core/src/test/java/com/scalar/db/io/FloatColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/FloatColumnTest.java @@ -32,6 +32,11 @@ public void of_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -58,6 +63,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -85,6 +95,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/IntColumnTest.java b/core/src/test/java/com/scalar/db/io/IntColumnTest.java index 243c14c42f..3c93cd2923 100644 --- a/core/src/test/java/com/scalar/db/io/IntColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/IntColumnTest.java @@ -32,6 +32,11 @@ public void of_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -58,6 +63,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -85,6 +95,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/KeyTest.java b/core/src/test/java/com/scalar/db/io/KeyTest.java index 8cf2ac9abd..8b264043ff 100644 --- a/core/src/test/java/com/scalar/db/io/KeyTest.java +++ b/core/src/test/java/com/scalar/db/io/KeyTest.java @@ -5,6 +5,10 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Arrays; import java.util.List; import org.junit.jupiter.api.Test; @@ -20,6 +24,10 @@ public class KeyTest { private static final String ANY_TEXT_4 = "text4"; private static final int ANY_INT_1 = 10; private static final int ANY_INT_2 = 20; + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; @Test public void constructor_WithSingleBooleanValue_ShouldReturnWhatsSet() { @@ -185,6 +193,18 @@ public void constructor_WithMultipleNamesAndValues_ShouldReturnWhatsSet() { "key9", 2468); Key key4 = new Key("key1", true, "key2", 5678, "key3", 1234L, "key4", 4.56f, "key5", 1.23); + Key key5 = + new Key( + "key1", + ANY_DATE, + "key2", + ANY_TIME, + "key3", + ANY_TIMESTAMP, + "key4", + ANY_TIMESTAMPTZ, + "key5", + 1.23); // Act Assert List> values1 = key1.get(); @@ -249,16 +269,36 @@ public void constructor_WithMultipleNamesAndValues_ShouldReturnWhatsSet() { assertThat(values4.get(4)).isEqualTo(new DoubleValue("key5", 1.23)); assertThat(key4.size()).isEqualTo(5); - assertThat(key1.getColumnName(0)).isEqualTo("key1"); - assertThat(key1.getBooleanValue(0)).isEqualTo(true); - assertThat(key1.getColumnName(1)).isEqualTo("key2"); - assertThat(key1.getIntValue(1)).isEqualTo(5678); + assertThat(key4.getColumnName(0)).isEqualTo("key1"); + assertThat(key4.getBooleanValue(0)).isEqualTo(true); + assertThat(key4.getColumnName(1)).isEqualTo("key2"); + assertThat(key4.getIntValue(1)).isEqualTo(5678); assertThat(key4.getColumnName(2)).isEqualTo("key3"); assertThat(key4.getBigIntValue(2)).isEqualTo(1234L); assertThat(key4.getColumnName(3)).isEqualTo("key4"); assertThat(key4.getFloatValue(3)).isEqualTo(4.56f); assertThat(key4.getColumnName(4)).isEqualTo("key5"); assertThat(key4.getDoubleValue(4)).isEqualTo(1.23); + + List> columns5 = key5.getColumns(); + assertThat(columns5.size()).isEqualTo(5); + assertThat(columns5.get(0)).isEqualTo(DateColumn.of("key1", ANY_DATE)); + assertThat(columns5.get(1)).isEqualTo(TimeColumn.of("key2", ANY_TIME)); + assertThat(columns5.get(2)).isEqualTo(TimestampColumn.of("key3", ANY_TIMESTAMP)); + assertThat(columns5.get(3)).isEqualTo(TimestampTZColumn.of("key4", ANY_TIMESTAMPTZ)); + assertThat(columns5.get(4)).isEqualTo(DoubleColumn.of("key5", 1.23)); + + assertThat(key5.size()).isEqualTo(5); + assertThat(key5.getColumnName(0)).isEqualTo("key1"); + assertThat(key5.getDateValue(0)).isEqualTo(ANY_DATE); + assertThat(key5.getColumnName(1)).isEqualTo("key2"); + assertThat(key5.getTimeValue(1)).isEqualTo(ANY_TIME); + assertThat(key5.getColumnName(2)).isEqualTo("key3"); + assertThat(key5.getTimestampValue(2)).isEqualTo(ANY_TIMESTAMP); + assertThat(key5.getColumnName(3)).isEqualTo("key4"); + assertThat(key5.getTimestampTZValue(3)).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(key5.getColumnName(4)).isEqualTo("key5"); + assertThat(key5.getDoubleValue(4)).isEqualTo(1.23); } @Test @@ -412,6 +452,74 @@ public void ofBlob_ByteBufferValueGiven_ShouldReturnWhatsSet() { assertThat(key.getBlobValueAsBytes(0)).isEqualTo(value); } + @Test + public void ofDate_ShouldReturnWhatsSet() { + // Arrange + String name = ANY_NAME_1; + Key key = Key.ofDate(name, ANY_DATE); + + // Act Assert + List> columns = key.getColumns(); + assertThat(columns.size()).isEqualTo(1); + assertThat(columns.get(0).getName()).isEqualTo(name); + assertThat(columns.get(0).getDateValue()).isEqualTo(ANY_DATE); + + assertThat(key.size()).isEqualTo(1); + assertThat(key.getColumnName(0)).isEqualTo(name); + assertThat(key.getDateValue(0)).isEqualTo(ANY_DATE); + } + + @Test + public void ofTime_ShouldReturnWhatsSet() { + // Arrange + String name = ANY_NAME_1; + Key key = Key.ofTime(name, ANY_TIME); + + // Act Assert + List> columns = key.getColumns(); + assertThat(columns.size()).isEqualTo(1); + assertThat(columns.get(0).getName()).isEqualTo(name); + assertThat(columns.get(0).getTimeValue()).isEqualTo(ANY_TIME); + + assertThat(key.size()).isEqualTo(1); + assertThat(key.getColumnName(0)).isEqualTo(name); + assertThat(key.getTimeValue(0)).isEqualTo(ANY_TIME); + } + + @Test + public void ofTimestamp_ShouldReturnWhatsSet() { + // Arrange + String name = ANY_NAME_1; + Key key = Key.ofTimestamp(name, ANY_TIMESTAMP); + + // Act Assert + List> columns = key.getColumns(); + assertThat(columns.size()).isEqualTo(1); + assertThat(columns.get(0).getName()).isEqualTo(name); + assertThat(columns.get(0).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + + assertThat(key.size()).isEqualTo(1); + assertThat(key.getColumnName(0)).isEqualTo(name); + assertThat(key.getTimestampValue(0)).isEqualTo(ANY_TIMESTAMP); + } + + @Test + public void ofTimestampTZ_ShouldReturnWhatsSet() { + // Arrange + String name = ANY_NAME_1; + Key key = Key.ofTimestampTZ(name, ANY_TIMESTAMPTZ); + + // Act Assert + List> columns = key.getColumns(); + assertThat(columns.size()).isEqualTo(1); + assertThat(columns.get(0).getName()).isEqualTo(name); + assertThat(columns.get(0).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); + + assertThat(key.size()).isEqualTo(1); + assertThat(key.getColumnName(0)).isEqualTo(name); + assertThat(key.getTimestampTZValue(0)).isEqualTo(ANY_TIMESTAMPTZ); + } + @Test public void of_ShouldReturnWhatsSet() { // Arrange @@ -428,6 +536,18 @@ public void of_ShouldReturnWhatsSet() { "key9", 2468); Key key4 = Key.of("key1", true, "key2", 5678, "key3", 1234L, "key4", 4.56f, "key5", 1.23); + Key key5 = + Key.of( + "key1", + ANY_DATE, + "key2", + ANY_TIME, + "key3", + ANY_TIMESTAMP, + "key4", + ANY_TIMESTAMPTZ, + "key5", + 1.23); // Act Assert List> values1 = key1.get(); @@ -502,6 +622,26 @@ public void of_ShouldReturnWhatsSet() { assertThat(key4.getFloatValue(3)).isEqualTo(4.56f); assertThat(key4.getColumnName(4)).isEqualTo("key5"); assertThat(key4.getDoubleValue(4)).isEqualTo(1.23); + + List> columns5 = key5.getColumns(); + assertThat(columns5.size()).isEqualTo(5); + assertThat(columns5.get(0)).isEqualTo(DateColumn.of("key1", ANY_DATE)); + assertThat(columns5.get(1)).isEqualTo(TimeColumn.of("key2", ANY_TIME)); + assertThat(columns5.get(2)).isEqualTo(TimestampColumn.of("key3", ANY_TIMESTAMP)); + assertThat(columns5.get(3)).isEqualTo(TimestampTZColumn.of("key4", ANY_TIMESTAMPTZ)); + assertThat(columns5.get(4)).isEqualTo(DoubleColumn.of("key5", 1.23)); + + assertThat(key5.size()).isEqualTo(5); + assertThat(key5.getColumnName(0)).isEqualTo("key1"); + assertThat(key5.getDateValue(0)).isEqualTo(ANY_DATE); + assertThat(key5.getColumnName(1)).isEqualTo("key2"); + assertThat(key5.getTimeValue(1)).isEqualTo(ANY_TIME); + assertThat(key5.getColumnName(2)).isEqualTo("key3"); + assertThat(key5.getTimestampValue(2)).isEqualTo(ANY_TIMESTAMP); + assertThat(key5.getColumnName(3)).isEqualTo("key4"); + assertThat(key5.getTimestampTZValue(3)).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(key5.getColumnName(4)).isEqualTo("key5"); + assertThat(key5.getDoubleValue(4)).isEqualTo(1.23); } @Test @@ -599,13 +739,17 @@ public void getColumns_ProperKeysGivenInBuilder_ShouldReturnWhatsSet() { .add(DoubleColumn.of("key5", 1.23)) .add(TextColumn.of("key6", "string_key")) .add(BlobColumn.of("key7", "blob_key".getBytes(StandardCharsets.UTF_8))) + .addTimestampTZ("key8", ANY_TIMESTAMPTZ) + .addTime("key9", ANY_TIME) + .addDate("key10", ANY_DATE) + .addTimestamp("key11", ANY_TIMESTAMP) .build(); // Act List> columns = key.getColumns(); // Assert - assertThat(columns.size()).isEqualTo(7); + assertThat(columns.size()).isEqualTo(11); assertThat(columns.get(0).getName()).isEqualTo("key1"); assertThat(columns.get(0).getBooleanValue()).isEqualTo(true); assertThat(columns.get(1).getName()).isEqualTo("key2"); @@ -621,6 +765,14 @@ public void getColumns_ProperKeysGivenInBuilder_ShouldReturnWhatsSet() { assertThat(columns.get(6).getName()).isEqualTo("key7"); assertThat(columns.get(6).getBlobValue()) .isEqualTo(ByteBuffer.wrap("blob_key".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.get(7).getName()).isEqualTo("key8"); + assertThat(columns.get(7).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(columns.get(8).getName()).isEqualTo("key9"); + assertThat(columns.get(8).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.get(9).getName()).isEqualTo("key10"); + assertThat(columns.get(9).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.get(10).getName()).isEqualTo("key11"); + assertThat(columns.get(10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); } @Test diff --git a/core/src/test/java/com/scalar/db/io/TextColumnTest.java b/core/src/test/java/com/scalar/db/io/TextColumnTest.java index f499835471..8830dd3142 100644 --- a/core/src/test/java/com/scalar/db/io/TextColumnTest.java +++ b/core/src/test/java/com/scalar/db/io/TextColumnTest.java @@ -32,6 +32,11 @@ public void of_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -58,6 +63,11 @@ public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -85,6 +95,11 @@ public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(column::getBlobValueAsBytes) .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); } @Test diff --git a/core/src/test/java/com/scalar/db/io/TimeColumnTest.java b/core/src/test/java/com/scalar/db/io/TimeColumnTest.java new file mode 100644 index 0000000000..013aa7b1e6 --- /dev/null +++ b/core/src/test/java/com/scalar/db/io/TimeColumnTest.java @@ -0,0 +1,155 @@ +package com.scalar.db.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; + +import java.time.Clock; +import java.time.LocalDate; +import java.time.LocalTime; +import org.junit.jupiter.api.Test; + +class TimeColumnTest { + private static final LocalTime ANY_TIME = LocalTime.NOON; + + @Test + public void of_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + TimeColumn column = TimeColumn.of("col", ANY_TIME); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_TIME); + assertThat(column.getTimeValue()).isEqualTo(ANY_TIME); + assertThat(column.getDataType()).isEqualTo(DataType.TIME); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_TIME); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + TimeColumn column = TimeColumn.ofNull("col"); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isNotPresent(); + assertThat(column.getTimeValue()).isNull(); + assertThat(column.getDataType()).isEqualTo(DataType.TIME); + assertThat(column.hasNullValue()).isTrue(); + assertThat(column.getValueAsObject()).isNull(); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { + // Arrange + + // Act + TimeColumn column = TimeColumn.of("col", ANY_TIME).copyWith("col2"); + + // Assert + assertThat(column.getName()).isEqualTo("col2"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_TIME); + assertThat(column.getTimeValue()).isEqualTo(ANY_TIME); + assertThat(column.getDataType()).isEqualTo(DataType.TIME); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_TIME); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void compareTo_ShouldReturnProperResults() { + // Arrange + TimeColumn column = TimeColumn.of("col", ANY_TIME); + + // Act Assert + assertThat(column.compareTo(TimeColumn.of("col", ANY_TIME))).isZero(); + assertThat(column.compareTo(TimeColumn.of("col", ANY_TIME.minusHours(1)))).isPositive(); + assertThat(column.compareTo(TimeColumn.of("col", ANY_TIME.plusMinutes(1)))).isNegative(); + assertThat(column.compareTo(TimeColumn.ofNull("col"))).isPositive(); + } + + @Test + public void constructor_valueInsideRange_ShouldNotThrowException() { + + // Act Assert + assertDoesNotThrow(() -> TimeColumn.of("col", TimeColumn.MIN_VALUE)); + assertDoesNotThrow(() -> TimeColumn.of("col", TimeColumn.MAX_VALUE)); + } + + @Test + public void constructor_valueOutOfRange_ShouldThrowIllegalArgumentException() { + // Arrange + LocalDate dateBeforeRangeMin = DateColumn.MIN_VALUE.minusDays(1); + LocalDate dateAfterRangeMax = DateColumn.MAX_VALUE.plusDays(1); + + // Act Assert + assertThatThrownBy(() -> DateColumn.of("col", dateBeforeRangeMin)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> DateColumn.of("col", dateAfterRangeMax)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void constructor_valueWithSubMicrosecondPrecision_ShouldThrowIllegalArgumentException() { + // Arrange + LocalTime timeWithThreeDigitNano = LocalTime.now(Clock.systemUTC()).withNano(123_456_789); + LocalTime timeWithTwoDigitNano = LocalTime.now(Clock.systemUTC()).withNano(123_456_780); + LocalTime timeWithOneDigitNano = LocalTime.now(Clock.systemUTC()).withNano(123_456_700); + + // Act Assert + assertThatThrownBy(() -> TimeColumn.of("col", timeWithThreeDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimeColumn.of("col", timeWithTwoDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimeColumn.of("col", timeWithOneDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/io/TimestampColumnTest.java b/core/src/test/java/com/scalar/db/io/TimestampColumnTest.java new file mode 100644 index 0000000000..8781e3c5f4 --- /dev/null +++ b/core/src/test/java/com/scalar/db/io/TimestampColumnTest.java @@ -0,0 +1,170 @@ +package com.scalar.db.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; + +import java.time.Clock; +import java.time.LocalDateTime; +import org.junit.jupiter.api.Test; + +class TimestampColumnTest { + private static final LocalDateTime ANY_TIMESTAMP = + LocalDateTime.now(Clock.systemUTC()).withNano(123_000_000); + + @Test + public void of_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + TimestampColumn column = TimestampColumn.of("col", ANY_TIMESTAMP); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_TIMESTAMP); + assertThat(column.getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(column.getDataType()).isEqualTo(DataType.TIMESTAMP); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_TIMESTAMP); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + TimestampColumn column = TimestampColumn.ofNull("col"); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isNotPresent(); + assertThat(column.getTimestampValue()).isNull(); + assertThat(column.getDataType()).isEqualTo(DataType.TIMESTAMP); + assertThat(column.hasNullValue()).isTrue(); + assertThat(column.getValueAsObject()).isNull(); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { + // Arrange + + // Act + TimestampColumn column = TimestampColumn.of("col", ANY_TIMESTAMP).copyWith("col2"); + + // Assert + assertThat(column.getName()).isEqualTo("col2"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_TIMESTAMP); + assertThat(column.getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(column.getDataType()).isEqualTo(DataType.TIMESTAMP); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_TIMESTAMP); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampTZValue) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void compareTo_ShouldReturnProperResults() { + // Arrange + TimestampColumn column = TimestampColumn.of("col", ANY_TIMESTAMP); + + // Act Assert + assertThat(column.compareTo(TimestampColumn.of("col", ANY_TIMESTAMP))).isZero(); + assertThat(column.compareTo(TimestampColumn.of("col", ANY_TIMESTAMP.minusHours(1)))) + .isPositive(); + assertThat(column.compareTo(TimestampColumn.of("col", ANY_TIMESTAMP.plusDays(1)))).isNegative(); + assertThat(column.compareTo(TimestampColumn.ofNull("col"))).isPositive(); + } + + @Test + public void constructor_valueInsideRange_ShouldNotThrowException() { + // Act Assert + assertDoesNotThrow(() -> TimestampColumn.of("col", TimestampColumn.MIN_VALUE)); + assertDoesNotThrow(() -> TimestampColumn.of("col", TimestampColumn.MAX_VALUE)); + } + + @Test + public void constructor_valueOutOfRange_ShouldThrowIllegalArgumentException() { + // Arrange + LocalDateTime dateBeforeRangeMin = TimestampColumn.MIN_VALUE.minusSeconds(1); + LocalDateTime dateAfterRangeMax = TimestampColumn.MAX_VALUE.plusSeconds(1); + + // Act Assert + assertThatThrownBy(() -> TimestampColumn.of("col", dateBeforeRangeMin)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampColumn.of("col", dateAfterRangeMax)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void constructor_valueWithSubMillisecondPrecision_ShouldThrowIllegalArgumentException() { + // Arrange + LocalDateTime timestampWithThreeDigitNano = + LocalDateTime.now(Clock.systemUTC()).withNano(123_456_789); + LocalDateTime timestampWithTwoDigitNano = + LocalDateTime.now(Clock.systemUTC()).withNano(123_456_780); + LocalDateTime timestampWithOneDigitNano = + LocalDateTime.now(Clock.systemUTC()).withNano(123_456_700); + LocalDateTime timestampWithThreeDigitMicro = + LocalDateTime.now(Clock.systemUTC()).withNano(123_456_000); + LocalDateTime timestampWithTwoDigitMicro = + LocalDateTime.now(Clock.systemUTC()).withNano(123_450_000); + LocalDateTime timestampWithOneDigitMicro = + LocalDateTime.now(Clock.systemUTC()).withNano(123_400_700); + + // Act Assert + assertThatThrownBy(() -> TimestampColumn.of("col", timestampWithThreeDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampColumn.of("col", timestampWithTwoDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampColumn.of("col", timestampWithOneDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampColumn.of("col", timestampWithThreeDigitMicro)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampColumn.of("col", timestampWithTwoDigitMicro)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampColumn.of("col", timestampWithOneDigitMicro)) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/io/TimestampTZColumnTest.java b/core/src/test/java/com/scalar/db/io/TimestampTZColumnTest.java new file mode 100644 index 0000000000..4bd04f613a --- /dev/null +++ b/core/src/test/java/com/scalar/db/io/TimestampTZColumnTest.java @@ -0,0 +1,165 @@ +package com.scalar.db.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; + +import java.time.Instant; +import java.time.temporal.ChronoField; +import org.junit.jupiter.api.Test; + +class TimestampTZColumnTest { + private static final Instant ANY_TIMESTAMPTZ = Instant.ofEpochSecond(12354); + + @Test + public void of_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + TimestampTZColumn column = TimestampTZColumn.of("col", ANY_TIMESTAMPTZ); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(column.getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(column.getDataType()).isEqualTo(DataType.TIMESTAMPTZ); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_TIMESTAMPTZ); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void ofNull_ProperValueGiven_ShouldReturnWhatsSet() { + // Arrange + + // Act + TimestampTZColumn column = TimestampTZColumn.ofNull("col"); + + // Assert + assertThat(column.getName()).isEqualTo("col"); + assertThat(column.getValue()).isNotPresent(); + assertThat(column.getTimestampTZValue()).isNull(); + assertThat(column.getDataType()).isEqualTo(DataType.TIMESTAMPTZ); + assertThat(column.hasNullValue()).isTrue(); + assertThat(column.getValueAsObject()).isNull(); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void copyWith_ProperValueGiven_ShouldReturnSameValueButDifferentName() { + // Arrange + + // Act + TimestampTZColumn column = TimestampTZColumn.of("col", ANY_TIMESTAMPTZ).copyWith("col2"); + + // Assert + assertThat(column.getName()).isEqualTo("col2"); + assertThat(column.getValue()).isPresent(); + assertThat(column.getValue().get()).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(column.getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); + assertThat(column.getDataType()).isEqualTo(DataType.TIMESTAMPTZ); + assertThat(column.hasNullValue()).isFalse(); + assertThat(column.getValueAsObject()).isEqualTo(ANY_TIMESTAMPTZ); + assertThatThrownBy(column::getIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBigIntValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getFloatValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDoubleValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTextValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsByteBuffer) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getBlobValueAsBytes) + .isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getDateValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimeValue).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(column::getTimestampValue).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void compareTo_ShouldReturnProperResults() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.of("col", ANY_TIMESTAMPTZ); + + // Act Assert + assertThat(column.compareTo(TimestampTZColumn.of("col", ANY_TIMESTAMPTZ))).isZero(); + assertThat(column.compareTo(TimestampTZColumn.of("col", ANY_TIMESTAMPTZ.minusSeconds(1)))) + .isPositive(); + assertThat(column.compareTo(TimestampTZColumn.of("col", ANY_TIMESTAMPTZ.plusSeconds(1)))) + .isNegative(); + assertThat(column.compareTo(TimestampTZColumn.ofNull("col"))).isPositive(); + } + + @Test + public void constructor_valueInsideRange_ShouldNotThrowException() { + // Act Assert + assertDoesNotThrow(() -> TimestampTZColumn.of("col", TimestampTZColumn.MIN_VALUE)); + assertDoesNotThrow(() -> TimestampTZColumn.of("col", TimestampTZColumn.MAX_VALUE)); + } + + @Test + public void constructor_valueOutOfRange_ShouldThrowIllegalArgumentException() { + // Arrange + Instant dateBeforeRangeMin = TimestampTZColumn.MIN_VALUE.minusMillis(1); + Instant dateAfterRangeMax = TimestampTZColumn.MAX_VALUE.plusMillis(1); + + // Act Assert + assertThatThrownBy(() -> TimestampTZColumn.of("col", dateBeforeRangeMin)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampTZColumn.of("col", dateAfterRangeMax)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void constructor_valueWithSubMillisecondPrecision_ShouldThrowIllegalArgumentException() { + // Arrange + Instant timestampWithThreeDigitNano = + Instant.now().with(ChronoField.NANO_OF_SECOND, 123_456_789); + Instant timestampWithTwoDigitNano = Instant.now().with(ChronoField.NANO_OF_SECOND, 123_456_780); + Instant timestampWithOneDigitNano = Instant.now().with(ChronoField.NANO_OF_SECOND, 123_456_700); + Instant timestampWithThreeDigitMicro = + Instant.now().with(ChronoField.NANO_OF_SECOND, 123_456_000); + Instant timestampWithTwoDigitMicro = + Instant.now().with(ChronoField.NANO_OF_SECOND, 123_450_000); + Instant timestampWithOneDigitMicro = + Instant.now().with(ChronoField.NANO_OF_SECOND, 123_400_700); + + // Act Assert + assertThatThrownBy(() -> TimestampTZColumn.of("col", timestampWithThreeDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampTZColumn.of("col", timestampWithTwoDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampTZColumn.of("col", timestampWithOneDigitNano)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampTZColumn.of("col", timestampWithThreeDigitMicro)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampTZColumn.of("col", timestampWithTwoDigitMicro)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> TimestampTZColumn.of("col", timestampWithOneDigitMicro)) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/TimeRelatedColumnEncodingUtilsTest.java b/core/src/test/java/com/scalar/db/storage/TimeRelatedColumnEncodingUtilsTest.java new file mode 100644 index 0000000000..b226cf1cf5 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/TimeRelatedColumnEncodingUtilsTest.java @@ -0,0 +1,296 @@ +package com.scalar.db.storage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Random; +import org.junit.jupiter.api.Test; + +class TimeRelatedColumnEncodingUtilsTest { + @Test + public void encodeDate_ShouldWorkProperly() { + // Arrange + DateColumn column = DateColumn.of("date", LocalDate.of(2023, 10, 1)); + + // Act + long encoded = TimeRelatedColumnEncodingUtils.encode(column); + + // Assert + assertThat(encoded).isEqualTo(LocalDate.of(2023, 10, 1).toEpochDay()); + } + + @Test + public void encodeTime_ShouldWorkProperly() { + // Arrange + TimeColumn column = TimeColumn.of("time", LocalTime.of(12, 34, 56, 123_456_000)); + + // Act + long encoded = TimeRelatedColumnEncodingUtils.encode(column); + + // Assert + assertThat(encoded).isEqualTo(LocalTime.of(12, 34, 56, 123_456_000).toNanoOfDay()); + } + + @Test + public void encodeTimestamp_ShouldWorkProperly() { + // Arrange + TimestampColumn positiveEpochSecondWithNano = + TimestampColumn.of("timestamp", LocalDateTime.of(2023, 10, 1, 12, 34, 56, 789_000_000)); + TimestampColumn positiveEpochSecondWithZeroNano = + TimestampColumn.of("timestamp", LocalDateTime.of(2023, 10, 1, 12, 34, 56, 0)); + TimestampColumn negativeEpochSecondWithNano = + TimestampColumn.of("timestamp", LocalDateTime.of(1234, 10, 1, 12, 34, 56, 456_000_000)); + TimestampColumn negativeEpochSecondWithZeroNano = + TimestampColumn.of("timestamp", LocalDateTime.of(1234, 10, 1, 12, 34, 56, 0)); + TimestampColumn epoch = + TimestampColumn.of( + "timestamp", LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.of(0, 0))); + + // Act + long actualPositiveEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.encode(positiveEpochSecondWithNano); + long actualPositiveEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.encode(positiveEpochSecondWithZeroNano); + long actualNegativeEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.encode(negativeEpochSecondWithNano); + long actualNegativeEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.encode(negativeEpochSecondWithZeroNano); + long actualEpoch = TimeRelatedColumnEncodingUtils.encode(epoch); + + // Assert + assertThat(actualPositiveEpochSecondWithNano).isEqualTo(1696163696789L); + assertThat(actualPositiveEpochSecondWithZeroNano).isEqualTo(1696163696000L); + assertThat(actualNegativeEpochSecondWithNano).isEqualTo(-23202242704543L); + assertThat(actualNegativeEpochSecondWithZeroNano).isEqualTo(-23202242704999L); + assertThat(actualEpoch).isEqualTo(0L); + } + + @Test + public void encodeTimestampTZ_ShouldWorkProperly() { + // Arrange + TimestampTZColumn positiveEpochSecondWithNano = + TimestampTZColumn.of( + "timestamptz", + LocalDateTime.of(2023, 10, 1, 12, 34, 56, 789_000_000).toInstant(ZoneOffset.UTC)); + TimestampTZColumn positiveEpochSecondWithZeroNano = + TimestampTZColumn.of( + "timestamptz", LocalDateTime.of(2023, 10, 1, 12, 34, 56, 0).toInstant(ZoneOffset.UTC)); + TimestampTZColumn negativeEpochSecondWithNano = + TimestampTZColumn.of( + "timestamptz", + LocalDateTime.of(1234, 10, 1, 12, 34, 56, 456_000_000).toInstant(ZoneOffset.UTC)); + TimestampTZColumn negativeEpochSecondWithZeroNano = + TimestampTZColumn.of( + "timestamptz", LocalDateTime.of(1234, 10, 1, 12, 34, 56, 0).toInstant(ZoneOffset.UTC)); + TimestampTZColumn epoch = + TimestampTZColumn.of( + "timestamptz", + LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.of(0, 0)) + .toInstant(ZoneOffset.UTC)); + + // Act + long actualPositiveEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.encode(positiveEpochSecondWithNano); + long actualPositiveEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.encode(positiveEpochSecondWithZeroNano); + long actualNegativeEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.encode(negativeEpochSecondWithNano); + long actualNegativeEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.encode(negativeEpochSecondWithZeroNano); + long actualEpoch = TimeRelatedColumnEncodingUtils.encode(epoch); + + // Assert + assertThat(actualPositiveEpochSecondWithNano).isEqualTo(1696163696789L); + assertThat(actualPositiveEpochSecondWithZeroNano).isEqualTo(1696163696000L); + assertThat(actualNegativeEpochSecondWithNano).isEqualTo(-23202242704543L); + assertThat(actualNegativeEpochSecondWithZeroNano).isEqualTo(-23202242704999L); + assertThat(actualEpoch).isEqualTo(0L); + } + + @Test + public void decodeDate_ShouldWorkProperly() { + // Arrange Act + LocalDate date = + TimeRelatedColumnEncodingUtils.decodeDate(LocalDate.of(2023, 10, 1).toEpochDay()); + + // Assert + assertThat(date).isEqualTo(LocalDate.of(2023, 10, 1)); + } + + @Test + public void decodeTime_ShouldWorkProperly() { + // Arrange Act + LocalTime time = + TimeRelatedColumnEncodingUtils.decodeTime( + LocalTime.of(12, 34, 56, 123_456_000).toNanoOfDay()); + + // Assert + assertThat(time).isEqualTo(LocalTime.of(12, 34, 56, 123_456_000)); + } + + @Test + public void decodeTimestamp_ShouldWorkProperly() { + // Act + LocalDateTime positiveEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.decodeTimestamp(1696163696789L); + LocalDateTime positiveEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.decodeTimestamp(1696163696000L); + LocalDateTime negativeEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.decodeTimestamp(-23202242704543L); + LocalDateTime negativeEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.decodeTimestamp(-23202242704999L); + LocalDateTime epoch = TimeRelatedColumnEncodingUtils.decodeTimestamp(0L); + + // Act assert + assertThat(positiveEpochSecondWithNano) + .isEqualTo(LocalDateTime.of(2023, 10, 1, 12, 34, 56, 789_000_000)); + assertThat(positiveEpochSecondWithZeroNano) + .isEqualTo(LocalDateTime.of(2023, 10, 1, 12, 34, 56, 0)); + assertThat(negativeEpochSecondWithNano) + .isEqualTo(LocalDateTime.of(1234, 10, 1, 12, 34, 56, 456_000_000)); + assertThat(negativeEpochSecondWithZeroNano) + .isEqualTo(LocalDateTime.of(1234, 10, 1, 12, 34, 56, 0)); + assertThat(epoch).isEqualTo(LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.of(0, 0))); + } + + @Test + public void decodeTimestampTZ_ShouldWorkProperly() { + // Arrange + Instant positiveEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.decodeTimestampTZ(1696163696789L); + Instant positiveEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.decodeTimestampTZ(1696163696000L); + Instant negativeEpochSecondWithNano = + TimeRelatedColumnEncodingUtils.decodeTimestampTZ(-23202242704543L); + Instant negativeEpochSecondWithZeroNano = + TimeRelatedColumnEncodingUtils.decodeTimestampTZ(-23202242704999L); + Instant epoch = TimeRelatedColumnEncodingUtils.decodeTimestampTZ(0); + + // Act assert + assertThat(positiveEpochSecondWithNano) + .isEqualTo( + LocalDateTime.of(2023, 10, 1, 12, 34, 56, 789_000_000).toInstant(ZoneOffset.UTC)); + assertThat(positiveEpochSecondWithZeroNano) + .isEqualTo(LocalDateTime.of(2023, 10, 1, 12, 34, 56, 0).toInstant(ZoneOffset.UTC)); + assertThat(negativeEpochSecondWithNano) + .isEqualTo( + LocalDateTime.of(1234, 10, 1, 12, 34, 56, 456_000_000).toInstant(ZoneOffset.UTC)); + assertThat(negativeEpochSecondWithZeroNano) + .isEqualTo(LocalDateTime.of(1234, 10, 1, 12, 34, 56, 0).toInstant(ZoneOffset.UTC)); + assertThat(epoch).isEqualTo(Instant.EPOCH); + } + + @Test + public void encodeThenDecodeTimestamp_ShouldPreserveDataIntegrity() { + // Arrange + TimestampColumn min = TimestampColumn.of("timestamp", TimestampColumn.MIN_VALUE); + TimestampColumn max = TimestampColumn.of("timestamp", TimestampColumn.MAX_VALUE); + + // Act Assert + assertThat( + TimeRelatedColumnEncodingUtils.decodeTimestamp( + TimeRelatedColumnEncodingUtils.encode(min))) + .isEqualTo(TimestampColumn.MIN_VALUE); + assertThat( + TimeRelatedColumnEncodingUtils.decodeTimestamp( + TimeRelatedColumnEncodingUtils.encode(max))) + .isEqualTo(TimestampColumn.MAX_VALUE); + LocalDateTime start = LocalDateTime.ofEpochSecond(-2, 0, ZoneOffset.UTC); + LocalDateTime end = LocalDateTime.ofEpochSecond(3, 0, ZoneOffset.UTC); + for (LocalDateTime dt = start; dt.isBefore(end); dt = dt.plusNanos(1_000_000)) { + assertThat( + TimeRelatedColumnEncodingUtils.decodeTimestamp( + TimeRelatedColumnEncodingUtils.encode(TimestampColumn.of("ts", dt)))) + .isEqualTo(dt); + } + } + + @Test + public void encodeThenDecodeTimestampTZ_ShouldPreserverDataIntegrity() { + // Arrange + TimestampTZColumn min = TimestampTZColumn.of("timestampTZ", TimestampTZColumn.MIN_VALUE); + TimestampTZColumn max = TimestampTZColumn.of("timestampTZ", TimestampTZColumn.MAX_VALUE); + + // Act Assert + assertThat( + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + TimeRelatedColumnEncodingUtils.encode(min))) + .isEqualTo(TimestampTZColumn.MIN_VALUE); + assertThat( + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + TimeRelatedColumnEncodingUtils.encode(max))) + .isEqualTo(TimestampTZColumn.MAX_VALUE); + Instant start = Instant.ofEpochSecond(-2, 0); + Instant end = Instant.ofEpochSecond(3, 0); + for (Instant instant = start; instant.isBefore(end); instant = instant.plusNanos(1_000_000)) { + assertThat( + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + TimeRelatedColumnEncodingUtils.encode(TimestampTZColumn.of("ts", instant)))) + .isEqualTo(instant); + } + } + + @Test + public void encodeTimestamp_ShouldPreserveOrder() { + // Arrange + List expectedTimestamps = new ArrayList<>(); + LocalDateTime start = LocalDateTime.ofEpochSecond(-2, 0, ZoneOffset.UTC); + LocalDateTime end = LocalDateTime.ofEpochSecond(3, 0, ZoneOffset.UTC); + for (LocalDateTime dt = start; dt.isBefore(end); dt = dt.plusNanos(1_000_000)) { + expectedTimestamps.add(TimeRelatedColumnEncodingUtils.encode(TimestampColumn.of("ts", dt))); + } + long seed = System.currentTimeMillis(); + System.out.printf( + "The seed used in the %s.%s unit test is %s%n", + this.getClass().getSimpleName(), "encodeTimestamp_ShouldPreserveOrder", seed); + ThreadLocal random = ThreadLocal.withInitial(Random::new); + random.get().setSeed(seed); + + // Act + List shuffledThenSorted = new ArrayList<>(expectedTimestamps); + Collections.shuffle(shuffledThenSorted, random.get()); + shuffledThenSorted.sort(Comparator.naturalOrder()); + + // Assert + assertThat(shuffledThenSorted).containsExactlyElementsOf(expectedTimestamps); + } + + @Test + public void encodeTimestampTZ_ShouldPreserveOrder() { + // Arrange + List expectedTimestamps = new ArrayList<>(); + Instant start = Instant.ofEpochSecond(-2, 0); + Instant end = Instant.ofEpochSecond(3, 0); + for (Instant instant = start; instant.isBefore(end); instant = instant.plusNanos(1_000_000)) { + expectedTimestamps.add( + TimeRelatedColumnEncodingUtils.encode(TimestampTZColumn.of("ts", instant))); + } + long seed = System.currentTimeMillis(); + System.out.printf( + "The seed used in the %s.%s unit test is %s%n", + this.getClass().getSimpleName(), "encodeTimestampTZ_ShouldPreserveOrder", seed); + ThreadLocal random = ThreadLocal.withInitial(Random::new); + random.get().setSeed(seed); + + // Act + List shuffledThenSorted = new ArrayList<>(expectedTimestamps); + Collections.shuffle(shuffledThenSorted, random.get()); + shuffledThenSorted.sort(Comparator.naturalOrder()); + + // Assert + assertThat(shuffledThenSorted).containsExactlyElementsOf(expectedTimestamps); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/cassandra/CassandraAdminTest.java b/core/src/test/java/com/scalar/db/storage/cassandra/CassandraAdminTest.java index bfb0da5d87..e1f216f77d 100644 --- a/core/src/test/java/com/scalar/db/storage/cassandra/CassandraAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/cassandra/CassandraAdminTest.java @@ -247,8 +247,13 @@ private void verifyInsertIntoKeyspacesTableQuery(String keyspace) { .addColumn("c1", DataType.INT) .addColumn("c2", DataType.TEXT) .addColumn("c3", DataType.BLOB) - .addColumn("c4", DataType.INT) + .addColumn("c4", DataType.BIGINT) .addColumn("c5", DataType.BOOLEAN) + .addColumn("c6", DataType.DOUBLE) + .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMPTZ) .addSecondaryIndex("c2") .addSecondaryIndex("c4") .build(); @@ -260,10 +265,15 @@ private void verifyInsertIntoKeyspacesTableQuery(String keyspace) { TableOptions createTableStatement = SchemaBuilder.createTable(namespace, table) .addPartitionKey("c1", com.datastax.driver.core.DataType.cint()) - .addClusteringColumn("c4", com.datastax.driver.core.DataType.cint()) + .addClusteringColumn("c4", com.datastax.driver.core.DataType.bigint()) .addColumn("c2", com.datastax.driver.core.DataType.text()) .addColumn("c3", com.datastax.driver.core.DataType.blob()) .addColumn("c5", com.datastax.driver.core.DataType.cboolean()) + .addColumn("c6", com.datastax.driver.core.DataType.cdouble()) + .addColumn("c7", com.datastax.driver.core.DataType.cfloat()) + .addColumn("c8", com.datastax.driver.core.DataType.date()) + .addColumn("c9", com.datastax.driver.core.DataType.time()) + .addColumn("c10", com.datastax.driver.core.DataType.timestamp()) .withOptions() .clusteringOrder("c4", Direction.ASC) .compactionOptions(SchemaBuilder.sizedTieredStategy()); @@ -283,6 +293,7 @@ private void verifyInsertIntoKeyspacesTableQuery(String keyspace) { .addPartitionKey("c7") .addClusteringKey("c4") .addClusteringKey("c6", Order.DESC) + .addClusteringKey("c9", Order.ASC) .addColumn("c1", DataType.INT) .addColumn("c2", DataType.TEXT) .addColumn("c3", DataType.BLOB) @@ -290,6 +301,9 @@ private void verifyInsertIntoKeyspacesTableQuery(String keyspace) { .addColumn("c5", DataType.BIGINT) .addColumn("c6", DataType.BOOLEAN) .addColumn("c7", DataType.TEXT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMPTZ) .addSecondaryIndex("c2") .addSecondaryIndex("c4") .build(); @@ -306,12 +320,16 @@ private void verifyInsertIntoKeyspacesTableQuery(String keyspace) { .addPartitionKey("c7", com.datastax.driver.core.DataType.text()) .addClusteringColumn("c4", com.datastax.driver.core.DataType.cdouble()) .addClusteringColumn("c6", com.datastax.driver.core.DataType.cboolean()) + .addClusteringColumn("c9", com.datastax.driver.core.DataType.time()) .addColumn("c2", com.datastax.driver.core.DataType.text()) .addColumn("c3", com.datastax.driver.core.DataType.blob()) .addColumn("c5", com.datastax.driver.core.DataType.bigint()) + .addColumn("c8", com.datastax.driver.core.DataType.date()) + .addColumn("c10", com.datastax.driver.core.DataType.timestamp()) .withOptions() .clusteringOrder("c4", Direction.ASC) .clusteringOrder("c6", Direction.DESC) + .clusteringOrder("c9", Direction.ASC) .compactionOptions(SchemaBuilder.leveledStrategy()); verify(cassandraSession).execute(createTableStatement.getQueryString()); } @@ -861,12 +879,16 @@ public void unsupportedOperations_ShouldThrowUnsupportedException() { // Act Throwable thrown1 = - catchThrowable(() -> cassandraAdmin.getImportTableMetadata(namespace, table)); + catchThrowable( + () -> cassandraAdmin.getImportTableMetadata(namespace, table, Collections.emptyMap())); Throwable thrown2 = catchThrowable( () -> cassandraAdmin.addRawColumnToTable(namespace, table, column, DataType.INT)); Throwable thrown3 = - catchThrowable(() -> cassandraAdmin.importTable(namespace, table, Collections.emptyMap())); + catchThrowable( + () -> + cassandraAdmin.importTable( + namespace, table, Collections.emptyMap(), Collections.emptyMap())); // Assert assertThat(thrown1).isInstanceOf(UnsupportedOperationException.class); diff --git a/core/src/test/java/com/scalar/db/storage/cassandra/ResultInterpreterTest.java b/core/src/test/java/com/scalar/db/storage/cassandra/ResultInterpreterTest.java index d1a9f1faeb..31ef8bbdc4 100644 --- a/core/src/test/java/com/scalar/db/storage/cassandra/ResultInterpreterTest.java +++ b/core/src/test/java/com/scalar/db/storage/cassandra/ResultInterpreterTest.java @@ -1,17 +1,26 @@ package com.scalar.db.storage.cassandra; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.when; import com.datastax.driver.core.Row; import com.scalar.db.api.Result; import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.BigIntColumn; import com.scalar.db.io.BigIntValue; -import com.scalar.db.io.Value; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; import java.util.Collections; +import java.util.Date; import java.util.List; import java.util.Map; import org.junit.jupiter.api.BeforeEach; @@ -32,22 +41,32 @@ public class ResultInterpreterTest { private static final String ANY_COLUMN_NAME_5 = "col5"; private static final String ANY_COLUMN_NAME_6 = "col6"; private static final String ANY_COLUMN_NAME_7 = "col7"; + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; private static final TableMetadata TABLE_METADATA = TableMetadata.newBuilder() - .addColumn(ANY_NAME_1, com.scalar.db.io.DataType.TEXT) - .addColumn(ANY_NAME_2, com.scalar.db.io.DataType.TEXT) - .addColumn(ANY_COLUMN_NAME_1, com.scalar.db.io.DataType.BOOLEAN) - .addColumn(ANY_COLUMN_NAME_2, com.scalar.db.io.DataType.INT) - .addColumn(ANY_COLUMN_NAME_3, com.scalar.db.io.DataType.BIGINT) - .addColumn(ANY_COLUMN_NAME_4, com.scalar.db.io.DataType.FLOAT) - .addColumn(ANY_COLUMN_NAME_5, com.scalar.db.io.DataType.DOUBLE) - .addColumn(ANY_COLUMN_NAME_6, com.scalar.db.io.DataType.TEXT) - .addColumn(ANY_COLUMN_NAME_7, com.scalar.db.io.DataType.BLOB) + .addColumn(ANY_NAME_1, DataType.TEXT) + .addColumn(ANY_NAME_2, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_1, DataType.BOOLEAN) + .addColumn(ANY_COLUMN_NAME_2, DataType.INT) + .addColumn(ANY_COLUMN_NAME_3, DataType.BIGINT) + .addColumn(ANY_COLUMN_NAME_4, DataType.FLOAT) + .addColumn(ANY_COLUMN_NAME_5, DataType.DOUBLE) + .addColumn(ANY_COLUMN_NAME_6, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_7, DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMPTZ) .addPartitionKey(ANY_NAME_1) .addClusteringKey(ANY_NAME_2) .build(); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Mock private Row row; @BeforeEach @@ -69,86 +88,100 @@ public void interpret_ShouldReturnWhatsSet() { byte[] bytesValue = "bytes".getBytes(StandardCharsets.UTF_8); when(row.getBytes(ANY_COLUMN_NAME_7)) .thenReturn((ByteBuffer) ByteBuffer.allocate(bytesValue.length).put(bytesValue).flip()); + when(row.getDate(ANY_COLUMN_NAME_8)) + .thenReturn( + com.datastax.driver.core.LocalDate.fromDaysSinceEpoch((int) ANY_DATE.toEpochDay())); + when(row.getTime(ANY_COLUMN_NAME_9)).thenReturn(ANY_TIME.toNanoOfDay()); + when(row.getTimestamp(ANY_COLUMN_NAME_10)).thenReturn(java.util.Date.from(ANY_TIMESTAMPTZ)); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); // Act - Result result = spy.interpret(row); + Result result = interpreter.interpret(row); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()) - .isEqualTo(BigIntValue.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().get()).isEqualTo("string"); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isTrue(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(BigIntValue.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().get()).isEqualTo("string"); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isFalse(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isFalse(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(Integer.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isFalse(); - assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntValue.MAX_VALUE); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isFalse(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(Float.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isFalse(); assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(Double.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isFalse(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isEqualTo("string"); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isFalse(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)) .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)) .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMPTZ); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isTrue(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(Float.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isEqualTo("string"); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.containsKey(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); } @Test @@ -170,78 +203,116 @@ public void interpret_ShouldReturnWhatsSetWithNullValues() { when(row.getString(ANY_COLUMN_NAME_6)).thenReturn(null); when(row.isNull(ANY_COLUMN_NAME_7)).thenReturn(true); when(row.getBytes(ANY_COLUMN_NAME_7)).thenReturn(null); - + when(row.isNull(ANY_COLUMN_NAME_8)).thenReturn(true); + when(row.getDate(ANY_COLUMN_NAME_8)).thenReturn(null); + when(row.isNull(ANY_COLUMN_NAME_9)).thenReturn(true); + when(row.getTime(ANY_COLUMN_NAME_9)).thenReturn(0L); + when(row.isNull(ANY_COLUMN_NAME_10)).thenReturn(true); + when(row.getTimestamp(ANY_COLUMN_NAME_10)).thenReturn(null); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); // Act - Result result = spy.interpret(row); + Result result = interpreter.interpret(row); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isFalse(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(0); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()).isEqualTo(0L); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(0.0F); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(0.0D); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString()).isNotPresent(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes()).isNotPresent(); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isFalse(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(0); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(0L); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(0.0F); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(0.0D); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString()).isNotPresent(); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes()).isNotPresent(); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(0); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(0L); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(0.0F); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0.0D); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)).isNull(); - assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_10)).isNull(); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isFalse(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(0); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(0L); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(0.0F); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(0.0D); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampTZValue()).isNull(); + } + + @Test + public void interpret_TimestampType_ShouldThrowUnsupportedOperationException() { + // Arrange + TableMetadata tableMetadata = + TableMetadata.newBuilder() + .addColumn(ANY_NAME_1, DataType.TEXT) + .addColumn(ANY_NAME_2, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_1, DataType.TIMESTAMP) + .addPartitionKey(ANY_NAME_1) + .addClusteringKey(ANY_NAME_2) + .build(); + + when(row.getString(ANY_NAME_1)).thenReturn(ANY_TEXT_1); + when(row.getString(ANY_NAME_2)).thenReturn(ANY_TEXT_2); + when(row.getTimestamp(ANY_COLUMN_NAME_1)).thenReturn(Date.from(TimestampTZColumn.MAX_VALUE)); + + ResultInterpreter interpreter = new ResultInterpreter(Collections.emptyList(), tableMetadata); + + // Act + assertThatThrownBy(() -> interpreter.interpret(row)) + .isInstanceOf(UnsupportedOperationException.class); } } diff --git a/core/src/test/java/com/scalar/db/storage/cassandra/ValueBinderTest.java b/core/src/test/java/com/scalar/db/storage/cassandra/ValueBinderTest.java index 0dc8ba0821..9f89beaf8c 100644 --- a/core/src/test/java/com/scalar/db/storage/cassandra/ValueBinderTest.java +++ b/core/src/test/java/com/scalar/db/storage/cassandra/ValueBinderTest.java @@ -7,12 +7,21 @@ import com.scalar.db.io.BigIntColumn; import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.Date; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mock; @@ -26,6 +35,11 @@ public class ValueBinderTest { private static final float ANY_FLOAT = 1.0f; private static final double ANY_DOUBLE = 1.0; private static final String ANY_STRING = "1"; + private static final LocalDate ANY_DATE = LocalDate.ofEpochDay(1); + private static final LocalTime ANY_TIME = LocalTime.ofSecondOfDay(1); + private static final LocalDateTime ANY_TIMESTAMP = + LocalDateTime.of(LocalDate.ofEpochDay(1), LocalTime.ofSecondOfDay(1)); + private static final Instant ANY_TIMESTAMPTZ = Instant.ofEpochSecond(1); @Mock private BoundStatement bound; @@ -222,6 +236,110 @@ public void visit_BlobColumnWithNullValueAcceptCalled_ShouldCallSetToNull() { verify(bound).setToNull(0); } + @Test + public void visit_DateColumnAcceptCalled_ShouldCallSetString() { + // Arrange + DateColumn column = DateColumn.of(ANY_NAME, ANY_DATE); + ValueBinder binder = new ValueBinder(bound); + + // Act + column.accept(binder); + + // Assert + verify(bound) + .setDate( + 0, + com.datastax.driver.core.LocalDate.fromYearMonthDay( + ANY_DATE.getYear(), ANY_DATE.getMonthValue(), ANY_DATE.getDayOfMonth())); + } + + @Test + public void visit_DateColumnWithNullValueAcceptCalled_ShouldCallSetToNull() { + // Arrange + DateColumn column = DateColumn.ofNull(ANY_NAME); + ValueBinder binder = new ValueBinder(bound); + + // Act + column.accept(binder); + + // Assert + verify(bound).setToNull(0); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldCallSetString() { + // Arrange + TimeColumn column = TimeColumn.of(ANY_NAME, ANY_TIME); + ValueBinder binder = new ValueBinder(bound); + + // Act + column.accept(binder); + + // Assert + verify(bound).setTime(0, ANY_TIME.toNanoOfDay()); + } + + @Test + public void visit_TimeColumnWithNullValueAcceptCalled_ShouldCallSetToNull() { + // Arrange + TimeColumn column = TimeColumn.ofNull(ANY_NAME); + ValueBinder binder = new ValueBinder(bound); + + // Act + column.accept(binder); + + // Assert + verify(bound).setToNull(0); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldThrowUnsupportOperationException() { + // Arrange + TimestampColumn column = TimestampColumn.of(ANY_NAME, ANY_TIMESTAMP); + ValueBinder binder = new ValueBinder(bound); + + // Act Assert + assertThatThrownBy(() -> column.accept(binder)) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void visit_TimestampColumnWithNullValueAcceptCalled_ShouldCallSetToNull() { + // Arrange + TimestampColumn column = TimestampColumn.ofNull(ANY_NAME); + ValueBinder binder = new ValueBinder(bound); + + // Act Assert + assertThatThrownBy(() -> column.accept(binder)) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void visit_TimestampTZAcceptCalled_ShouldCallSetString() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.of(ANY_NAME, ANY_TIMESTAMPTZ); + ValueBinder binder = new ValueBinder(bound); + + // Act + column.accept(binder); + + // Assert + verify(bound).setTimestamp(0, Date.from(ANY_TIMESTAMPTZ)); + } + + @Test + public void visit_TimestampTZColumnWithNullValueAcceptCalled_ShouldCallSetToNull() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.ofNull(ANY_NAME); + ValueBinder binder = new ValueBinder(bound); + + // Act + column.accept(binder); + + // Assert + verify(bound).setToNull(0); + } + @Test public void visit_AcceptCalledMultipleTimes_ShouldCallSetWithIncremented() { // Arrange diff --git a/core/src/test/java/com/scalar/db/storage/cosmos/ConcatenationVisitorTest.java b/core/src/test/java/com/scalar/db/storage/cosmos/ConcatenationVisitorTest.java index f05b3238cc..249d893767 100644 --- a/core/src/test/java/com/scalar/db/storage/cosmos/ConcatenationVisitorTest.java +++ b/core/src/test/java/com/scalar/db/storage/cosmos/ConcatenationVisitorTest.java @@ -3,13 +3,17 @@ import static org.assertj.core.api.Assertions.assertThat; import com.scalar.db.io.BigIntColumn; -import com.scalar.db.io.BigIntValue; import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import java.nio.charset.StandardCharsets; import java.util.Base64; import org.junit.jupiter.api.BeforeEach; @@ -21,7 +25,7 @@ public class ConcatenationVisitorTest { BooleanColumn.of("any_boolean", ANY_BOOLEAN); private static final int ANY_INT = Integer.MIN_VALUE; private static final IntColumn ANY_INT_COLUMN = IntColumn.of("any_int", ANY_INT); - private static final long ANY_BIGINT = BigIntValue.MAX_VALUE; + private static final long ANY_BIGINT = BigIntColumn.MAX_VALUE; private static final BigIntColumn ANY_BIGINT_COLUMN = BigIntColumn.of("any_bigint", ANY_BIGINT); private static final float ANY_FLOAT = Float.MIN_NORMAL; private static final FloatColumn ANY_FLOAT_COLUMN = FloatColumn.of("any_float", ANY_FLOAT); @@ -31,6 +35,12 @@ public class ConcatenationVisitorTest { private static final TextColumn ANY_TEXT_COLUMN = TextColumn.of("any_text", ANY_TEXT); private static final byte[] ANY_BLOB = "scalar".getBytes(StandardCharsets.UTF_8); private static final BlobColumn ANY_BLOB_COLUMN = BlobColumn.of("any_blob", ANY_BLOB); + private static final DateColumn ANY_DATE_COLUMN = DateColumn.of("any_date", DateColumn.MAX_VALUE); + private static final TimeColumn ANY_TIME_COLUMN = TimeColumn.of("any_time", TimeColumn.MAX_VALUE); + private static final TimestampColumn ANY_TIMESTAMP_COLUMN = + TimestampColumn.of("any_timestamp", TimestampColumn.MAX_VALUE); + private static final TimestampTZColumn ANY_TIMESTAMPTZ_COLUMN = + TimestampTZColumn.of("any_timestamp_tz", TimestampTZColumn.MAX_VALUE); private ConcatenationVisitor visitor; @BeforeEach @@ -48,11 +58,15 @@ public void build_AllTypesGiven_ShouldBuildString() { visitor.visit(ANY_DOUBLE_COLUMN); visitor.visit(ANY_TEXT_COLUMN); visitor.visit(ANY_BLOB_COLUMN); + visitor.visit(ANY_DATE_COLUMN); + visitor.visit(ANY_TIME_COLUMN); + visitor.visit(ANY_TIMESTAMP_COLUMN); + visitor.visit(ANY_TIMESTAMPTZ_COLUMN); String actual = visitor.build(); // Assert String[] values = actual.split(":", -1); - assertThat(values.length).isEqualTo(7); + assertThat(values.length).isEqualTo(11); assertThat(values[0]).isEqualTo(String.valueOf(ANY_BOOLEAN)); assertThat(values[1]).isEqualTo(String.valueOf(ANY_INT)); assertThat(values[2]).isEqualTo(String.valueOf(ANY_BIGINT)); @@ -61,6 +75,14 @@ public void build_AllTypesGiven_ShouldBuildString() { assertThat(values[5]).isEqualTo(ANY_TEXT); assertThat(values[6]) .isEqualTo(Base64.getUrlEncoder().withoutPadding().encodeToString(ANY_BLOB)); + assertThat(values[7]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN))); + assertThat(values[8]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN))); + assertThat(values[9]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN))); + assertThat(values[10]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN))); } @Test @@ -126,4 +148,44 @@ public void visit_BlobColumnAcceptCalled_ShouldBuildBlobAsString() { assertThat(visitor.build()) .isEqualTo(Base64.getUrlEncoder().withoutPadding().encodeToString(ANY_BLOB)); } + + @Test + public void visit_DateColumnAcceptCalled_ShouldBuildDateAsString() { + // Act + ANY_DATE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN))); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldBuildTimeAsString() { + // Act + ANY_TIME_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN))); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldBuildTimestampAsString() { + // Act + ANY_TIMESTAMP_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN))); + } + + @Test + public void visit_TimestampTZColumnAcceptCalled_ShouldBuildTimestampTZAsString() { + // Act + ANY_TIMESTAMPTZ_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN))); + } } diff --git a/core/src/test/java/com/scalar/db/storage/cosmos/CosmosAdminTest.java b/core/src/test/java/com/scalar/db/storage/cosmos/CosmosAdminTest.java index e9a84509b4..bfeb562889 100644 --- a/core/src/test/java/com/scalar/db/storage/cosmos/CosmosAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/cosmos/CosmosAdminTest.java @@ -41,6 +41,7 @@ import com.scalar.db.io.DataType; import java.util.Collections; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.function.Consumer; import java.util.stream.Stream; @@ -92,11 +93,27 @@ public void getTableMetadata_ShouldReturnCorrectTableMetadata() throws Execution any(PartitionKey.class), ArgumentMatchers.>any())) .thenReturn(response); - + Map columnsMap = + new ImmutableMap.Builder() + .put("c1", "int") + .put("c2", "text") + .put("c3", "bigint") + .put("c4", "boolean") + .put("c5", "blob") + .put("c6", "float") + .put("c7", "double") + .put("c8", "date") + .put("c9", "time") + .put("c10", "timestamp") + .put("c11", "timestamptz") + .build(); CosmosTableMetadata cosmosTableMetadata = CosmosTableMetadata.newBuilder() .partitionKeyNames(Sets.newLinkedHashSet("c1")) - .columns(ImmutableMap.of("c1", "int", "c2", "text", "c3", "bigint")) + .clusteringKeyNames(Sets.newLinkedHashSet("c2", "c3")) + .clusteringOrders(ImmutableMap.of("c2", "ASC", "c3", "DESC")) + .secondaryIndexNames(ImmutableSet.of("c4", "c9")) + .columns(columnsMap) .build(); when(response.getItem()).thenReturn(cosmosTableMetadata); @@ -108,10 +125,22 @@ public void getTableMetadata_ShouldReturnCorrectTableMetadata() throws Execution assertThat(actual) .isEqualTo( TableMetadata.newBuilder() + .addPartitionKey("c1") + .addClusteringKey("c2", Order.ASC) + .addClusteringKey("c3", Order.DESC) + .addSecondaryIndex("c4") + .addSecondaryIndex("c9") .addColumn("c1", DataType.INT) .addColumn("c2", DataType.TEXT) .addColumn("c3", DataType.BIGINT) - .addPartitionKey("c1") + .addColumn("c4", DataType.BOOLEAN) + .addColumn("c5", DataType.BLOB) + .addColumn("c6", DataType.FLOAT) + .addColumn("c7", DataType.DOUBLE) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .build()); verify(client).getDatabase(METADATA_DATABASE); @@ -242,6 +271,10 @@ public void createTable_ShouldCreateContainer() throws ExecutionException { .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c4") .build(); @@ -318,6 +351,10 @@ public void createTable_ShouldCreateContainer() throws ExecutionException { .put("c5", "int") .put("c6", "double") .put("c7", "float") + .put("c8", "date") + .put("c9", "time") + .put("c10", "timestamp") + .put("c11", "timestamptz") .build()) .secondaryIndexNames(ImmutableSet.of("c4")) .build(); @@ -340,6 +377,9 @@ public void createTable_WithoutClusteringKeys_ShouldCreateContainerWithComposite .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMPTZ) .addSecondaryIndex("c4") .build(); @@ -404,6 +444,9 @@ public void createTable_WithoutClusteringKeys_ShouldCreateContainerWithComposite .put("c5", "int") .put("c6", "double") .put("c7", "float") + .put("c8", "date") + .put("c9", "time") + .put("c10", "timestamptz") .build()) .build(); verify(metadataContainer).upsertItem(cosmosTableMetadata); @@ -1032,11 +1075,16 @@ public void unsupportedOperations_ShouldThrowUnsupportedException() { String column = "col"; // Act - Throwable thrown1 = catchThrowable(() -> admin.getImportTableMetadata(namespace, table)); + Throwable thrown1 = + catchThrowable( + () -> admin.getImportTableMetadata(namespace, table, Collections.emptyMap())); Throwable thrown2 = catchThrowable(() -> admin.addRawColumnToTable(namespace, table, column, DataType.INT)); Throwable thrown3 = - catchThrowable(() -> admin.importTable(namespace, table, Collections.emptyMap())); + catchThrowable( + () -> + admin.importTable( + namespace, table, Collections.emptyMap(), Collections.emptyMap())); // Assert assertThat(thrown1).isInstanceOf(UnsupportedOperationException.class); diff --git a/core/src/test/java/com/scalar/db/storage/cosmos/MapVisitorTest.java b/core/src/test/java/com/scalar/db/storage/cosmos/MapVisitorTest.java index a13c26da42..bd45f340c7 100644 --- a/core/src/test/java/com/scalar/db/storage/cosmos/MapVisitorTest.java +++ b/core/src/test/java/com/scalar/db/storage/cosmos/MapVisitorTest.java @@ -5,12 +5,21 @@ import com.scalar.db.io.BigIntColumn; import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -30,6 +39,17 @@ public class MapVisitorTest { private static final TextColumn ANY_TEXT_COLUMN = TextColumn.of("any_text", ANY_TEXT); private static final byte[] ANY_BLOB = ANY_TEXT.getBytes(StandardCharsets.UTF_8); private static final BlobColumn ANY_BLOB_COLUMN = BlobColumn.of("any_blob", ANY_BLOB); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final DateColumn ANY_DATE_COLUMN = DateColumn.of("any_date", ANY_DATE); + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final TimeColumn ANY_TIME_COLUMN = TimeColumn.of("any_time", ANY_TIME); + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final TimestampColumn ANY_TIMESTAMP_COLUMN = + TimestampColumn.of("any_timestamp", ANY_TIMESTAMP); + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + private static final TimestampTZColumn ANY_TIMESTAMPTZ_COLUMN = + TimestampTZColumn.of("any_timestamptz", ANY_TIMESTAMPTZ); + private MapVisitor visitor; @BeforeEach @@ -174,4 +194,84 @@ public void visit_BlobColumnWithNullValueAcceptCalled_ShouldGetMap() { assertThat(visitor.get().containsKey("any_blob")).isTrue(); assertThat(visitor.get().get("any_blob")).isNull(); } + + @Test + public void visit_DateColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_DATE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_DATE_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN)); + } + + @Test + public void visit_DateColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + DateColumn.ofNull("any_date").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_date")).isTrue(); + assertThat(visitor.get().get("any_date")).isNull(); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIME_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIME_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN)); + } + + @Test + public void visit_TimeColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimeColumn.ofNull("any_time").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_time")).isTrue(); + assertThat(visitor.get().get("any_time")).isNull(); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIMESTAMP_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIMESTAMP_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN)); + } + + @Test + public void visit_TimestampColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimestampColumn.ofNull("any_timestamp").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_timestamp")).isTrue(); + assertThat(visitor.get().get("any_timestamp")).isNull(); + } + + @Test + public void visit_TimestampTZColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIMESTAMPTZ_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIMESTAMPTZ_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN)); + } + + @Test + public void visit_TimestampTZColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimestampTZColumn.ofNull("any_timestamptz").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_timestamptz")).isTrue(); + assertThat(visitor.get().get("any_timestamptz")).isNull(); + } } diff --git a/core/src/test/java/com/scalar/db/storage/cosmos/ResultInterpreterTest.java b/core/src/test/java/com/scalar/db/storage/cosmos/ResultInterpreterTest.java index 64c9316a4b..bb12467e30 100644 --- a/core/src/test/java/com/scalar/db/storage/cosmos/ResultInterpreterTest.java +++ b/core/src/test/java/com/scalar/db/storage/cosmos/ResultInterpreterTest.java @@ -1,15 +1,24 @@ package com.scalar.db.storage.cosmos; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; import com.google.common.collect.ImmutableMap; import com.scalar.db.api.Result; import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.BigIntValue; -import com.scalar.db.io.Value; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Base64; import java.util.Collections; import java.util.HashMap; @@ -30,6 +39,10 @@ public class ResultInterpreterTest { private static final String ANY_COLUMN_NAME_5 = "col5"; private static final String ANY_COLUMN_NAME_6 = "col6"; private static final String ANY_COLUMN_NAME_7 = "col7"; + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; + private static final String ANY_COLUMN_NAME_11 = "col11"; private static final String ANY_ID_1 = "id"; private static final TableMetadata TABLE_METADATA = @@ -43,10 +56,19 @@ public class ResultInterpreterTest { .addColumn(ANY_COLUMN_NAME_5, com.scalar.db.io.DataType.DOUBLE) .addColumn(ANY_COLUMN_NAME_6, com.scalar.db.io.DataType.TEXT) .addColumn(ANY_COLUMN_NAME_7, com.scalar.db.io.DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMP) + .addColumn(ANY_COLUMN_NAME_11, DataType.TIMESTAMPTZ) .addPartitionKey(ANY_NAME_1) .addClusteringKey(ANY_NAME_2) .build(); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Test public void interpret_ShouldReturnWhatsSet() { // Arrange @@ -54,13 +76,27 @@ public void interpret_ShouldReturnWhatsSet() { ImmutableMap.builder() .put(ANY_COLUMN_NAME_1, true) .put(ANY_COLUMN_NAME_2, Integer.MAX_VALUE) - .put(ANY_COLUMN_NAME_3, BigIntValue.MAX_VALUE) + .put(ANY_COLUMN_NAME_3, BigIntColumn.MAX_VALUE) .put(ANY_COLUMN_NAME_4, Float.MAX_VALUE) .put(ANY_COLUMN_NAME_5, Double.MAX_VALUE) .put(ANY_COLUMN_NAME_6, "string") .put( ANY_COLUMN_NAME_7, Base64.getEncoder().encodeToString("bytes".getBytes(StandardCharsets.UTF_8))) + .put( + ANY_COLUMN_NAME_8, + TimeRelatedColumnEncodingUtils.encode(DateColumn.of(ANY_COLUMN_NAME_8, ANY_DATE))) + .put( + ANY_COLUMN_NAME_9, + TimeRelatedColumnEncodingUtils.encode(TimeColumn.of(ANY_COLUMN_NAME_9, ANY_TIME))) + .put( + ANY_COLUMN_NAME_10, + TimeRelatedColumnEncodingUtils.encode( + TimestampColumn.of(ANY_COLUMN_NAME_10, ANY_TIMESTAMP))) + .put( + ANY_COLUMN_NAME_11, + TimeRelatedColumnEncodingUtils.encode( + TimestampTZColumn.of(ANY_COLUMN_NAME_11, ANY_TIMESTAMPTZ))) .build(); Record record = new Record( @@ -70,83 +106,98 @@ public void interpret_ShouldReturnWhatsSet() { ImmutableMap.of(ANY_NAME_2, ANY_TEXT_2), recordValues); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); // Act - Result result = spy.interpret(record); + Result result = interpreter.interpret(record); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()) - .isEqualTo(BigIntValue.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().get()).isEqualTo("string"); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isTrue(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(BigIntValue.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().get()).isEqualTo("string"); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isFalse(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isFalse(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(Integer.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isFalse(); - assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntValue.MAX_VALUE); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isFalse(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(Float.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isFalse(); assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(Double.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isFalse(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isEqualTo("string"); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isFalse(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)) .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)) .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isTrue(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(Float.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isEqualTo("string"); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.containsKey(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); } @Test @@ -160,6 +211,10 @@ public void interpret_ShouldReturnWhatsSetWithNullValues() { recordValues.put(ANY_COLUMN_NAME_5, null); recordValues.put(ANY_COLUMN_NAME_6, null); recordValues.put(ANY_COLUMN_NAME_7, null); + recordValues.put(ANY_COLUMN_NAME_8, null); + recordValues.put(ANY_COLUMN_NAME_9, null); + recordValues.put(ANY_COLUMN_NAME_10, null); + recordValues.put(ANY_COLUMN_NAME_11, null); Record record = new Record( ANY_ID_1, @@ -170,75 +225,91 @@ public void interpret_ShouldReturnWhatsSetWithNullValues() { List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); // Act - Result result = spy.interpret(record); + Result result = interpreter.interpret(record); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isFalse(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(0); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()).isEqualTo(0L); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(0.0F); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(0.0D); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString()).isNotPresent(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes()).isNotPresent(); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isFalse(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(0); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(0L); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(0.0F); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(0.0D); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString()).isNotPresent(); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes()).isNotPresent(); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(0); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(0L); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(0.0F); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0.0D); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0D); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)).isNull(); - assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isNull(); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isFalse(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(0); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(0L); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(0.0F); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(0D); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isNull(); } } diff --git a/core/src/test/java/com/scalar/db/storage/cosmos/ValueBinderTest.java b/core/src/test/java/com/scalar/db/storage/cosmos/ValueBinderTest.java new file mode 100644 index 0000000000..d0df450924 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/cosmos/ValueBinderTest.java @@ -0,0 +1,319 @@ +package com.scalar.db.storage.cosmos; + +import static org.mockito.Mockito.verify; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.Base64; +import java.util.function.Consumer; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ValueBinderTest { + + private static final String ANY_NAME = "name"; + private static final boolean ANY_BOOL = true; + private static final int ANY_INT = 1; + private static final long ANY_LONG = 1L; + private static final float ANY_FLOAT = 1.0f; + private static final double ANY_DOUBLE = 1.0; + private static final String ANY_STRING = "1"; + private static final LocalDate ANY_DATE = LocalDate.ofEpochDay(1); + private static final LocalTime ANY_TIME = LocalTime.ofSecondOfDay(1); + private static final LocalDateTime ANY_TIMESTAMP = + LocalDateTime.of(LocalDate.ofEpochDay(1), LocalTime.ofSecondOfDay(1)); + private static final Instant ANY_TIMESTAMPTZ = Instant.ofEpochSecond(1); + + @Mock private Consumer consumer; + + private ValueBinder binder; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + binder = new ValueBinder(); + binder.set(consumer); + } + + @Test + public void visit_BooleanColumn_ShouldCallAccept() { + // Arrange + BooleanColumn column = BooleanColumn.of(ANY_NAME, ANY_BOOL); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(ANY_BOOL); + } + + @Test + public void visit_IntColumn_ShouldCallAccept() { + // Arrange + IntColumn column = IntColumn.of(ANY_NAME, ANY_INT); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(ANY_INT); + } + + @Test + public void visit_BigIntColumn_ShouldCallAccept() { + // Arrange + BigIntColumn column = BigIntColumn.of(ANY_NAME, ANY_LONG); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(ANY_LONG); + } + + @Test + public void visit_FloatColumn_ShouldCallAccept() { + // Arrange + FloatColumn column = FloatColumn.of(ANY_NAME, ANY_FLOAT); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(ANY_FLOAT); + } + + @Test + public void visit_DoubleColumn_ShouldCallAccept() { + // Arrange + DoubleColumn column = DoubleColumn.of(ANY_NAME, ANY_DOUBLE); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(ANY_DOUBLE); + } + + @Test + public void visit_TextColumn_ShouldCallAccept() { + // Arrange + TextColumn column = TextColumn.of(ANY_NAME, ANY_STRING); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(ANY_STRING); + } + + @Test + public void visit_BlobColumn_ShouldCallAccept() { + // Arrange + BlobColumn column = BlobColumn.of(ANY_NAME, ANY_STRING.getBytes(StandardCharsets.UTF_8)); + + // Act + column.accept(binder); + + // Assert + verify(consumer) + .accept(Base64.getEncoder().encodeToString(ANY_STRING.getBytes(StandardCharsets.UTF_8))); + } + + @Test + public void visit_DateColumn_ShouldCallAccept() { + // Arrange + DateColumn column = DateColumn.of(ANY_NAME, ANY_DATE); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Test + public void visit_TimeColumn_ShouldCallAccept() { + // Arrange + TimeColumn column = TimeColumn.of(ANY_NAME, ANY_TIME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Test + public void visit_TimestampColumn_ShouldCallAccept() { + // Arrange + TimestampColumn column = TimestampColumn.of(ANY_NAME, ANY_TIMESTAMP); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Test + public void visit_TimestampTZColumn_ShouldCallAccept() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.of(ANY_NAME, ANY_TIMESTAMPTZ); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Test + public void visit_BooleanColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + BooleanColumn column = BooleanColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_IntColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + IntColumn column = IntColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_BigIntColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + BigIntColumn column = BigIntColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_FloatColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + FloatColumn column = FloatColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_DoubleColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + DoubleColumn column = DoubleColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_TextColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + TextColumn column = TextColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_BlobColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + BlobColumn column = BlobColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_DateColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + DateColumn column = DateColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_TimeColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + TimeColumn column = TimeColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_TimestampColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + TimestampColumn column = TimestampColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } + + @Test + public void visit_TimestampTZColumnWithNullValue_ShouldCallAcceptWithNull() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + verify(consumer).accept(null); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/dynamo/DynamoAdminTestBase.java b/core/src/test/java/com/scalar/db/storage/dynamo/DynamoAdminTestBase.java index 86f78558a1..c2072e9445 100644 --- a/core/src/test/java/com/scalar/db/storage/dynamo/DynamoAdminTestBase.java +++ b/core/src/test/java/com/scalar/db/storage/dynamo/DynamoAdminTestBase.java @@ -314,6 +314,10 @@ public void createTable_WhenMetadataTableNotExist_ShouldCreateTableAndMetadataTa .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c4") .build(); @@ -453,6 +457,10 @@ public void createTable_WhenMetadataTableNotExist_ShouldCreateTableAndMetadataTa columns.put("c5", AttributeValue.builder().s("int").build()); columns.put("c6", AttributeValue.builder().s("double").build()); columns.put("c7", AttributeValue.builder().s("float").build()); + columns.put("c8", AttributeValue.builder().s("date").build()); + columns.put("c9", AttributeValue.builder().s("time").build()); + columns.put("c10", AttributeValue.builder().s("timestamp").build()); + columns.put("c11", AttributeValue.builder().s("timestamptz").build()); itemValues.put(DynamoAdmin.METADATA_ATTR_COLUMNS, AttributeValue.builder().m(columns).build()); itemValues.put( DynamoAdmin.METADATA_ATTR_PARTITION_KEY, @@ -505,6 +513,10 @@ public void createTable_WhenMetadataTableExists_ShouldCreateOnlyTable() .addColumn("c5", DataType.BLOB) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c4") .build(); @@ -606,6 +618,10 @@ public void createTable_WhenMetadataTableExists_ShouldCreateOnlyTable() columns.put("c5", AttributeValue.builder().s("blob").build()); columns.put("c6", AttributeValue.builder().s("double").build()); columns.put("c7", AttributeValue.builder().s("float").build()); + columns.put("c8", AttributeValue.builder().s("date").build()); + columns.put("c9", AttributeValue.builder().s("time").build()); + columns.put("c10", AttributeValue.builder().s("timestamp").build()); + columns.put("c11", AttributeValue.builder().s("timestamptz").build()); itemValues.put(DynamoAdmin.METADATA_ATTR_COLUMNS, AttributeValue.builder().m(columns).build()); itemValues.put( DynamoAdmin.METADATA_ATTR_PARTITION_KEY, @@ -1579,11 +1595,16 @@ public void getNamespacesNames_WithNonExistingNamespacesTable_ShouldReturnEmptyS @Test public void unsupportedOperations_ShouldThrowUnsupportedException() { // Arrange Act - Throwable thrown1 = catchThrowable(() -> admin.getImportTableMetadata(NAMESPACE, TABLE)); + Throwable thrown1 = + catchThrowable( + () -> admin.getImportTableMetadata(NAMESPACE, TABLE, Collections.emptyMap())); Throwable thrown2 = catchThrowable(() -> admin.addRawColumnToTable(NAMESPACE, TABLE, "c1", DataType.INT)); Throwable thrown3 = - catchThrowable(() -> admin.importTable(NAMESPACE, TABLE, Collections.emptyMap())); + catchThrowable( + () -> + admin.importTable( + NAMESPACE, TABLE, Collections.emptyMap(), Collections.emptyMap())); // Assert assertThat(thrown1).isInstanceOf(UnsupportedOperationException.class); diff --git a/core/src/test/java/com/scalar/db/storage/dynamo/ResultInterpreterTest.java b/core/src/test/java/com/scalar/db/storage/dynamo/ResultInterpreterTest.java index 379e1f5e5e..a5503002cd 100644 --- a/core/src/test/java/com/scalar/db/storage/dynamo/ResultInterpreterTest.java +++ b/core/src/test/java/com/scalar/db/storage/dynamo/ResultInterpreterTest.java @@ -1,14 +1,23 @@ package com.scalar.db.storage.dynamo; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; import com.scalar.db.api.Result; import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.BigIntValue; -import com.scalar.db.io.Value; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -30,6 +39,10 @@ public class ResultInterpreterTest { private static final String ANY_COLUMN_NAME_5 = "col5"; private static final String ANY_COLUMN_NAME_6 = "col6"; private static final String ANY_COLUMN_NAME_7 = "col7"; + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; + private static final String ANY_COLUMN_NAME_11 = "col11"; private static final TableMetadata TABLE_METADATA = TableMetadata.newBuilder() @@ -42,9 +55,17 @@ public class ResultInterpreterTest { .addColumn(ANY_COLUMN_NAME_5, com.scalar.db.io.DataType.DOUBLE) .addColumn(ANY_COLUMN_NAME_6, com.scalar.db.io.DataType.TEXT) .addColumn(ANY_COLUMN_NAME_7, com.scalar.db.io.DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMP) + .addColumn(ANY_COLUMN_NAME_11, DataType.TIMESTAMPTZ) .addPartitionKey(ANY_NAME_1) .addClusteringKey(ANY_NAME_2) .build(); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; @Test public void interpret_ShouldReturnWhatsSet() { @@ -58,7 +79,7 @@ public void interpret_ShouldReturnWhatsSet() { ANY_COLUMN_NAME_2, AttributeValue.builder().n(String.valueOf(Integer.MAX_VALUE)).build()); item.put( ANY_COLUMN_NAME_3, - AttributeValue.builder().n(String.valueOf(BigIntValue.MAX_VALUE)).build()); + AttributeValue.builder().n(String.valueOf(BigIntColumn.MAX_VALUE)).build()); item.put( ANY_COLUMN_NAME_4, AttributeValue.builder().n(String.valueOf(Float.MAX_VALUE)).build()); item.put( @@ -69,85 +90,132 @@ public void interpret_ShouldReturnWhatsSet() { AttributeValue.builder() .b(SdkBytes.fromByteArray("bytes".getBytes(StandardCharsets.UTF_8))) .build()); + item.put( + ANY_COLUMN_NAME_8, + AttributeValue.builder() + .n( + String.valueOf( + TimeRelatedColumnEncodingUtils.encode( + DateColumn.of(ANY_COLUMN_NAME_8, ANY_DATE)))) + .build()); + item.put( + ANY_COLUMN_NAME_9, + AttributeValue.builder() + .n( + String.valueOf( + TimeRelatedColumnEncodingUtils.encode( + TimeColumn.of(ANY_COLUMN_NAME_9, ANY_TIME)))) + .build()); + item.put( + ANY_COLUMN_NAME_10, + AttributeValue.builder() + .n( + String.valueOf( + TimeRelatedColumnEncodingUtils.encode( + TimestampColumn.of(ANY_COLUMN_NAME_10, ANY_TIMESTAMP)))) + .build()); + item.put( + ANY_COLUMN_NAME_11, + AttributeValue.builder() + .n( + String.valueOf( + TimeRelatedColumnEncodingUtils.encode( + TimestampTZColumn.of(ANY_COLUMN_NAME_11, ANY_TIMESTAMPTZ)))) + .build()); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); // Act - Result result = spy.interpret(item); + Result result = interpreter.interpret(item); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()) - .isEqualTo(BigIntValue.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().get()).isEqualTo("string"); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isTrue(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(BigIntValue.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().get()).isEqualTo("string"); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isFalse(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isFalse(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(Integer.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isFalse(); - assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntValue.MAX_VALUE); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isFalse(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(Float.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isFalse(); assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(Double.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isFalse(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isEqualTo("string"); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isFalse(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)) .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)) .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isTrue(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(Float.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isEqualTo("string"); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.containsKey(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); } @Test @@ -164,78 +232,98 @@ public void interpret_ShouldReturnWhatsSetWithNullValues() { item.put(ANY_COLUMN_NAME_5, AttributeValue.builder().nul(true).build()); item.put(ANY_COLUMN_NAME_6, AttributeValue.builder().nul(true).build()); item.put(ANY_COLUMN_NAME_7, AttributeValue.builder().nul(true).build()); + item.put(ANY_COLUMN_NAME_8, AttributeValue.builder().nul(true).build()); + item.put(ANY_COLUMN_NAME_9, AttributeValue.builder().nul(true).build()); + item.put(ANY_COLUMN_NAME_10, AttributeValue.builder().nul(true).build()); + item.put(ANY_COLUMN_NAME_11, AttributeValue.builder().nul(true).build()); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); // Act - Result result = spy.interpret(item); + Result result = interpreter.interpret(item); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isFalse(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(0); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()).isEqualTo(0L); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(0.0F); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(0.0D); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString()).isNotPresent(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes()).isNotPresent(); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isFalse(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(0); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(0L); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(0.0F); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(0.0D); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString()).isNotPresent(); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes()).isNotPresent(); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(0); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(0L); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(0.0F); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0.0D); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0D); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)).isNull(); - assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isNull(); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isFalse(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(0); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(0L); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(0.0F); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(0D); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isNull(); } } diff --git a/core/src/test/java/com/scalar/db/storage/dynamo/ValueBinderTest.java b/core/src/test/java/com/scalar/db/storage/dynamo/ValueBinderTest.java new file mode 100644 index 0000000000..fb12948f51 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/dynamo/ValueBinderTest.java @@ -0,0 +1,403 @@ +package com.scalar.db.storage.dynamo; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.MockitoAnnotations; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class ValueBinderTest { + private static final String ANY_NAME = "name"; + private static final boolean ANY_BOOL = true; + private static final int ANY_INT = 1; + private static final long ANY_LONG = 1L; + private static final float ANY_FLOAT = 1.0f; + private static final double ANY_DOUBLE = 1.0; + private static final String ANY_STRING = "1"; + private static final LocalDate ANY_DATE = LocalDate.ofEpochDay(1); + private static final LocalTime ANY_TIME = LocalTime.ofSecondOfDay(1); + private static final LocalDateTime ANY_TIMESTAMP = + LocalDateTime.of(LocalDate.ofEpochDay(1), LocalTime.ofSecondOfDay(1)); + private static final Instant ANY_TIMESTAMPTZ = Instant.ofEpochSecond(1); + + private ValueBinder binder; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + binder = new ValueBinder(":foo"); + } + + @Test + public void visit_BooleanColumn_ShouldBindAttributeValue() { + // Arrange + BooleanColumn column = BooleanColumn.of(ANY_NAME, ANY_BOOL); + + // Act + column.accept(binder); + Map values = binder.build(); + + // Assert + + assertThat(values) + .containsOnly(entry(":foo0", AttributeValue.builder().bool(ANY_BOOL).build())); + } + + @Test + public void visit_BooleanColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + BooleanColumn column = BooleanColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_IntColumn_ShouldBindAttributeValue() { + // Arrange + IntColumn column = IntColumn.of(ANY_NAME, ANY_INT); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly(entry(":foo0", AttributeValue.builder().n(String.valueOf(ANY_INT)).build())); + } + + @Test + public void visit_IntColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + IntColumn column = IntColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_BigIntColumn_ShouldBindAttributeValue() { + // Arrange + BigIntColumn column = BigIntColumn.of(ANY_NAME, ANY_LONG); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly(entry(":foo0", AttributeValue.builder().n(String.valueOf(ANY_LONG)).build())); + } + + @Test + public void visit_BigIntColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + BigIntColumn column = BigIntColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_FloatColumn_ShouldBindAttributeValue() { + // Arrange + FloatColumn column = FloatColumn.of(ANY_NAME, ANY_FLOAT); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry(":foo0", AttributeValue.builder().n(String.valueOf(ANY_FLOAT)).build())); + } + + @Test + public void visit_FloatColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + FloatColumn column = FloatColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_DoubleColumn_ShouldBindAttributeValue() { + // Arrange + DoubleColumn column = DoubleColumn.of(ANY_NAME, ANY_DOUBLE); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry(":foo0", AttributeValue.builder().n(String.valueOf(ANY_DOUBLE)).build())); + } + + @Test + public void visit_DoubleColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + DoubleColumn column = DoubleColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_TextColumn_ShouldBindAttributeValue() { + // Arrange + TextColumn column = TextColumn.of(ANY_NAME, ANY_STRING); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().s(ANY_STRING).build())); + } + + @Test + public void visit_TextColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + TextColumn column = TextColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_BlobColumn_ShouldBindAttributeValue() { + // Arrange + BlobColumn column = BlobColumn.of(ANY_NAME, ANY_STRING.getBytes(StandardCharsets.UTF_8)); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry( + ":foo0", + AttributeValue.builder() + .b(SdkBytes.fromByteArray(ANY_STRING.getBytes(StandardCharsets.UTF_8))) + .build())); + } + + @Test + public void visit_BlobColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + BlobColumn column = BlobColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_DateColumn_ShouldBindAttributeValue() { + // Arrange + DateColumn column = DateColumn.of(ANY_NAME, ANY_DATE); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry( + ":foo0", + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build())); + } + + @Test + public void visit_DateColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + DateColumn column = DateColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_TimeColumn_ShouldBindAttributeValue() { + // Arrange + TimeColumn column = TimeColumn.of(ANY_NAME, ANY_TIME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry( + ":foo0", + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build())); + } + + @Test + public void visit_TimeColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + TimeColumn column = TimeColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_TimestampColumn_ShouldBindAttributeValue() { + // Arrange + TimestampColumn column = TimestampColumn.of(ANY_NAME, ANY_TIMESTAMP); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry( + ":foo0", + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build())); + } + + @Test + public void visit_TimestampColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + TimestampColumn column = TimestampColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_TimestampTZColumn_ShouldBindAttributeValue() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.of(ANY_NAME, ANY_TIMESTAMPTZ); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry( + ":foo0", + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))) + .build())); + } + + @Test + public void visit_TimestampTZColumnWithNullValue_ShouldBindNullAttributeValue() { + // Arrange + TimestampTZColumn column = TimestampTZColumn.ofNull(ANY_NAME); + + // Act + column.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values).containsOnly(entry(":foo0", AttributeValue.builder().nul(true).build())); + } + + @Test + public void visit_SeveralColumns_ShouldBindAllColumnsCorrectly() { + // Arrange + TimestampTZColumn timestampTZColumn = TimestampTZColumn.of(ANY_NAME, ANY_TIMESTAMPTZ); + BooleanColumn booleanColumn = BooleanColumn.of(ANY_NAME, ANY_BOOL); + FloatColumn floatColumn = FloatColumn.ofNull(ANY_NAME); + BigIntColumn bigIntColumn = BigIntColumn.of(ANY_NAME, ANY_LONG); + + // Act + timestampTZColumn.accept(binder); + booleanColumn.accept(binder); + floatColumn.accept(binder); + bigIntColumn.accept(binder); + + // Assert + Map values = binder.build(); + assertThat(values) + .containsOnly( + entry( + ":foo0", + AttributeValue.builder() + .n(String.valueOf(TimeRelatedColumnEncodingUtils.encode(timestampTZColumn))) + .build()), + entry(":foo1", AttributeValue.builder().bool(ANY_BOOL).build()), + entry(":foo2", AttributeValue.builder().nul(true).build()), + entry(":foo3", AttributeValue.builder().n(String.valueOf(ANY_LONG)).build())); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java index 51c5ca5193..925812e26f 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcAdminTest.java @@ -12,7 +12,10 @@ import static org.assertj.core.api.Assertions.catchThrowable; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.description; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -35,6 +38,7 @@ import com.scalar.db.io.DataType; import java.sql.Connection; import java.sql.DatabaseMetaData; +import java.sql.JDBCType; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -214,7 +218,15 @@ private void getTableMetadata_forX_ShouldReturnTableMetadata( new SelectAllFromMetadataTableResultSetMocker.Row( "c6", DataType.DOUBLE.toString(), null, null, false), new SelectAllFromMetadataTableResultSetMocker.Row( - "c7", DataType.FLOAT.toString(), null, null, false)); + "c7", DataType.FLOAT.toString(), null, null, false), + new SelectAllFromMetadataTableResultSetMocker.Row( + "c8", DataType.DATE.toString(), null, null, false), + new SelectAllFromMetadataTableResultSetMocker.Row( + "c9", DataType.TIME.toString(), null, null, false), + new SelectAllFromMetadataTableResultSetMocker.Row( + "c10", DataType.TIMESTAMP.toString(), null, null, false), + new SelectAllFromMetadataTableResultSetMocker.Row( + "c11", DataType.TIMESTAMPTZ.toString(), null, null, false)); when(selectStatement.executeQuery()).thenReturn(resultSet); when(connection.prepareStatement(any())).thenReturn(selectStatement); when(dataSource.getConnection()).thenReturn(connection); @@ -237,6 +249,10 @@ private void getTableMetadata_forX_ShouldReturnTableMetadata( .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c4") .build(); assertThat(actualMetadata).isEqualTo(expectedMetadata); @@ -469,7 +485,7 @@ public void createTableInternal_ForSqlite_withInvalidTableName_ShouldThrowExecut public void createTableInternal_ForMysql_ShouldCreateTableAndIndexes() throws SQLException { createTableInternal_ForX_CreateTableAndIndexes( RdbEngine.MYSQL, - "CREATE TABLE `my_ns`.`foo_table`(`c3` BOOLEAN,`c1` VARCHAR(128),`c4` VARBINARY(128),`c2` BIGINT,`c5` INT,`c6` DOUBLE,`c7` REAL, PRIMARY KEY (`c3` ASC,`c1` DESC,`c4` ASC))", + "CREATE TABLE `my_ns`.`foo_table`(`c3` BOOLEAN,`c1` VARCHAR(128),`c4` VARBINARY(128),`c2` BIGINT,`c5` INT,`c6` DOUBLE,`c7` REAL,`c8` DATE,`c9` TIME(6),`c10` DATETIME(3),`c11` DATETIME(3), PRIMARY KEY (`c3` ASC,`c1` DESC,`c4` ASC))", "CREATE INDEX `index_my_ns_foo_table_c4` ON `my_ns`.`foo_table` (`c4`)", "CREATE INDEX `index_my_ns_foo_table_c1` ON `my_ns`.`foo_table` (`c1`)"); } @@ -481,7 +497,7 @@ public void createTableInternal_ForMysql_ShouldCreateTableAndIndexes() throws SQ when(config.getMysqlVariableKeyColumnSize()).thenReturn(64); createTableInternal_ForX_CreateTableAndIndexes( new RdbEngineMysql(config), - "CREATE TABLE `my_ns`.`foo_table`(`c3` BOOLEAN,`c1` VARCHAR(64),`c4` VARBINARY(64),`c2` BIGINT,`c5` INT,`c6` DOUBLE,`c7` REAL, PRIMARY KEY (`c3` ASC,`c1` DESC,`c4` ASC))", + "CREATE TABLE `my_ns`.`foo_table`(`c3` BOOLEAN,`c1` VARCHAR(64),`c4` VARBINARY(64),`c2` BIGINT,`c5` INT,`c6` DOUBLE,`c7` REAL,`c8` DATE,`c9` TIME(6),`c10` DATETIME(3),`c11` DATETIME(3), PRIMARY KEY (`c3` ASC,`c1` DESC,`c4` ASC))", "CREATE INDEX `index_my_ns_foo_table_c4` ON `my_ns`.`foo_table` (`c4`)", "CREATE INDEX `index_my_ns_foo_table_c1` ON `my_ns`.`foo_table` (`c1`)"); } @@ -490,7 +506,7 @@ public void createTableInternal_ForMysql_ShouldCreateTableAndIndexes() throws SQ public void createTableInternal_ForPostgresql_ShouldCreateTableAndIndexes() throws SQLException { createTableInternal_ForX_CreateTableAndIndexes( RdbEngine.POSTGRESQL, - "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" BOOLEAN,\"c1\" VARCHAR(10485760),\"c4\" BYTEA,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE PRECISION,\"c7\" REAL, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", + "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" BOOLEAN,\"c1\" VARCHAR(10485760),\"c4\" BYTEA,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE PRECISION,\"c7\" REAL,\"c8\" DATE,\"c9\" TIME,\"c10\" TIMESTAMP,\"c11\" TIMESTAMP WITH TIME ZONE, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", "CREATE UNIQUE INDEX \"my_ns.foo_table_clustering_order_idx\" ON \"my_ns\".\"foo_table\" (\"c3\" ASC,\"c1\" DESC,\"c4\" ASC)", "CREATE INDEX \"index_my_ns_foo_table_c4\" ON \"my_ns\".\"foo_table\" (\"c4\")", "CREATE INDEX \"index_my_ns_foo_table_c1\" ON \"my_ns\".\"foo_table\" (\"c1\")"); @@ -500,8 +516,7 @@ public void createTableInternal_ForPostgresql_ShouldCreateTableAndIndexes() thro public void createTableInternal_ForSqlServer_ShouldCreateTableAndIndexes() throws SQLException { createTableInternal_ForX_CreateTableAndIndexes( RdbEngine.SQL_SERVER, - "CREATE TABLE [my_ns].[foo_table]([c3] BIT,[c1] VARCHAR(8000)," - + "[c4] VARBINARY(8000),[c2] BIGINT,[c5] INT,[c6] FLOAT,[c7] FLOAT(24), PRIMARY KEY ([c3] ASC,[c1] DESC,[c4] ASC))", + "CREATE TABLE [my_ns].[foo_table]([c3] BIT,[c1] VARCHAR(8000),[c4] VARBINARY(8000),[c2] BIGINT,[c5] INT,[c6] FLOAT,[c7] FLOAT(24),[c8] DATE,[c9] TIME(6),[c10] DATETIME2(3),[c11] DATETIMEOFFSET(3), PRIMARY KEY ([c3] ASC,[c1] DESC,[c4] ASC))", "CREATE INDEX [index_my_ns_foo_table_c4] ON [my_ns].[foo_table] ([c4])", "CREATE INDEX [index_my_ns_foo_table_c1] ON [my_ns].[foo_table] ([c1])"); } @@ -510,7 +525,7 @@ public void createTableInternal_ForSqlServer_ShouldCreateTableAndIndexes() throw public void createTableInternal_ForOracle_ShouldCreateTableAndIndexes() throws SQLException { createTableInternal_ForX_CreateTableAndIndexes( RdbEngine.ORACLE, - "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" NUMBER(1),\"c1\" VARCHAR2(128),\"c4\" RAW(128),\"c2\" NUMBER(19),\"c5\" NUMBER(10),\"c6\" BINARY_DOUBLE,\"c7\" BINARY_FLOAT, PRIMARY KEY (\"c3\",\"c1\",\"c4\")) ROWDEPENDENCIES", + "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" NUMBER(1),\"c1\" VARCHAR2(128),\"c4\" RAW(128),\"c2\" NUMBER(19),\"c5\" NUMBER(10),\"c6\" BINARY_DOUBLE,\"c7\" BINARY_FLOAT,\"c8\" DATE,\"c9\" TIMESTAMP(6),\"c10\" TIMESTAMP(3),\"c11\" TIMESTAMP(3) WITH TIME ZONE, PRIMARY KEY (\"c3\",\"c1\",\"c4\")) ROWDEPENDENCIES", "ALTER TABLE \"my_ns\".\"foo_table\" INITRANS 3 MAXTRANS 255", "CREATE UNIQUE INDEX \"my_ns.foo_table_clustering_order_idx\" ON \"my_ns\".\"foo_table\" (\"c3\" ASC,\"c1\" DESC,\"c4\" ASC)", "CREATE INDEX \"index_my_ns_foo_table_c4\" ON \"my_ns\".\"foo_table\" (\"c4\")", @@ -524,7 +539,7 @@ public void createTableInternal_ForOracle_ShouldCreateTableAndIndexes() throws S when(config.getOracleVariableKeyColumnSize()).thenReturn(64); createTableInternal_ForX_CreateTableAndIndexes( new RdbEngineOracle(config), - "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" NUMBER(1),\"c1\" VARCHAR2(64),\"c4\" RAW(64),\"c2\" NUMBER(19),\"c5\" NUMBER(10),\"c6\" BINARY_DOUBLE,\"c7\" BINARY_FLOAT, PRIMARY KEY (\"c3\",\"c1\",\"c4\")) ROWDEPENDENCIES", + "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" NUMBER(1),\"c1\" VARCHAR2(64),\"c4\" RAW(64),\"c2\" NUMBER(19),\"c5\" NUMBER(10),\"c6\" BINARY_DOUBLE,\"c7\" BINARY_FLOAT,\"c8\" DATE,\"c9\" TIMESTAMP(6),\"c10\" TIMESTAMP(3),\"c11\" TIMESTAMP(3) WITH TIME ZONE, PRIMARY KEY (\"c3\",\"c1\",\"c4\")) ROWDEPENDENCIES", "ALTER TABLE \"my_ns\".\"foo_table\" INITRANS 3 MAXTRANS 255", "CREATE UNIQUE INDEX \"my_ns.foo_table_clustering_order_idx\" ON \"my_ns\".\"foo_table\" (\"c3\" ASC,\"c1\" DESC,\"c4\" ASC)", "CREATE INDEX \"index_my_ns_foo_table_c4\" ON \"my_ns\".\"foo_table\" (\"c4\")", @@ -535,7 +550,7 @@ public void createTableInternal_ForOracle_ShouldCreateTableAndIndexes() throws S public void createTableInternal_ForSqlite_ShouldCreateTableAndIndexes() throws SQLException { createTableInternal_ForX_CreateTableAndIndexes( RdbEngine.SQLITE, - "CREATE TABLE \"my_ns$foo_table\"(\"c3\" BOOLEAN,\"c1\" TEXT,\"c4\" BLOB,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE,\"c7\" FLOAT, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", + "CREATE TABLE \"my_ns$foo_table\"(\"c3\" BOOLEAN,\"c1\" TEXT,\"c4\" BLOB,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE,\"c7\" FLOAT,\"c8\" BIGINT,\"c9\" BIGINT,\"c10\" BIGINT,\"c11\" BIGINT, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", "CREATE INDEX \"index_my_ns_foo_table_c4\" ON \"my_ns$foo_table\" (\"c4\")", "CREATE INDEX \"index_my_ns_foo_table_c1\" ON \"my_ns$foo_table\" (\"c1\")"); } @@ -563,6 +578,10 @@ private void createTableInternal_ForX_CreateTableAndIndexes( .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c1") .addSecondaryIndex("c4") .build(); @@ -593,7 +612,7 @@ public void createTableInternal_IfNotExistsForMysql_ShouldCreateTableAndIndexesI throws SQLException { createTableInternal_IfNotExistsForX_createTableAndIndexesIfNotExists( RdbEngine.MYSQL, - "CREATE TABLE IF NOT EXISTS `my_ns`.`foo_table`(`c3` BOOLEAN,`c1` VARCHAR(128),`c4` VARBINARY(128),`c2` BIGINT,`c5` INT,`c6` DOUBLE,`c7` REAL, PRIMARY KEY (`c3` ASC,`c1` DESC,`c4` ASC))", + "CREATE TABLE IF NOT EXISTS `my_ns`.`foo_table`(`c3` BOOLEAN,`c1` VARCHAR(128),`c4` VARBINARY(128),`c2` BIGINT,`c5` INT,`c6` DOUBLE,`c7` REAL,`c8` DATE,`c9` TIME(6),`c10` DATETIME(3),`c11` DATETIME(3), PRIMARY KEY (`c3` ASC,`c1` DESC,`c4` ASC))", "CREATE INDEX `index_my_ns_foo_table_c4` ON `my_ns`.`foo_table` (`c4`)", "CREATE INDEX `index_my_ns_foo_table_c1` ON `my_ns`.`foo_table` (`c1`)"); } @@ -603,7 +622,7 @@ public void createTableInternal_IfNotExistsForPostgresql_ShouldCreateTableAndInd throws SQLException { createTableInternal_IfNotExistsForX_createTableAndIndexesIfNotExists( RdbEngine.POSTGRESQL, - "CREATE TABLE IF NOT EXISTS \"my_ns\".\"foo_table\"(\"c3\" BOOLEAN,\"c1\" VARCHAR(10485760),\"c4\" BYTEA,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE PRECISION,\"c7\" REAL, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", + "CREATE TABLE IF NOT EXISTS \"my_ns\".\"foo_table\"(\"c3\" BOOLEAN,\"c1\" VARCHAR(10485760),\"c4\" BYTEA,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE PRECISION,\"c7\" REAL,\"c8\" DATE,\"c9\" TIME,\"c10\" TIMESTAMP,\"c11\" TIMESTAMP WITH TIME ZONE, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", "CREATE UNIQUE INDEX IF NOT EXISTS \"my_ns.foo_table_clustering_order_idx\" ON \"my_ns\".\"foo_table\" (\"c3\" ASC,\"c1\" DESC,\"c4\" ASC)", "CREATE INDEX IF NOT EXISTS \"index_my_ns_foo_table_c4\" ON \"my_ns\".\"foo_table\" (\"c4\")", "CREATE INDEX IF NOT EXISTS \"index_my_ns_foo_table_c1\" ON \"my_ns\".\"foo_table\" (\"c1\")"); @@ -614,8 +633,7 @@ public void createTableInternal_IfNotExistsForSqlServer_ShouldCreateTableAndInde throws SQLException { createTableInternal_IfNotExistsForX_createTableAndIndexesIfNotExists( RdbEngine.SQL_SERVER, - "CREATE TABLE [my_ns].[foo_table]([c3] BIT,[c1] VARCHAR(8000)," - + "[c4] VARBINARY(8000),[c2] BIGINT,[c5] INT,[c6] FLOAT,[c7] FLOAT(24), PRIMARY KEY ([c3] ASC,[c1] DESC,[c4] ASC))", + "CREATE TABLE [my_ns].[foo_table]([c3] BIT,[c1] VARCHAR(8000),[c4] VARBINARY(8000),[c2] BIGINT,[c5] INT,[c6] FLOAT,[c7] FLOAT(24),[c8] DATE,[c9] TIME(6),[c10] DATETIME2(3),[c11] DATETIMEOFFSET(3), PRIMARY KEY ([c3] ASC,[c1] DESC,[c4] ASC))", "CREATE INDEX [index_my_ns_foo_table_c4] ON [my_ns].[foo_table] ([c4])", "CREATE INDEX [index_my_ns_foo_table_c1] ON [my_ns].[foo_table] ([c1])"); } @@ -625,7 +643,7 @@ public void createTableInternal_IfNotExistsForOracle_ShouldCreateTableAndIndexes throws SQLException { createTableInternal_IfNotExistsForX_createTableAndIndexesIfNotExists( RdbEngine.ORACLE, - "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" NUMBER(1),\"c1\" VARCHAR2(128),\"c4\" RAW(128),\"c2\" NUMBER(19),\"c5\" NUMBER(10),\"c6\" BINARY_DOUBLE,\"c7\" BINARY_FLOAT, PRIMARY KEY (\"c3\",\"c1\",\"c4\")) ROWDEPENDENCIES", + "CREATE TABLE \"my_ns\".\"foo_table\"(\"c3\" NUMBER(1),\"c1\" VARCHAR2(128),\"c4\" RAW(128),\"c2\" NUMBER(19),\"c5\" NUMBER(10),\"c6\" BINARY_DOUBLE,\"c7\" BINARY_FLOAT,\"c8\" DATE,\"c9\" TIMESTAMP(6),\"c10\" TIMESTAMP(3),\"c11\" TIMESTAMP(3) WITH TIME ZONE, PRIMARY KEY (\"c3\",\"c1\",\"c4\")) ROWDEPENDENCIES", "ALTER TABLE \"my_ns\".\"foo_table\" INITRANS 3 MAXTRANS 255", "CREATE UNIQUE INDEX \"my_ns.foo_table_clustering_order_idx\" ON \"my_ns\".\"foo_table\" (\"c3\" ASC,\"c1\" DESC,\"c4\" ASC)", "CREATE INDEX \"index_my_ns_foo_table_c4\" ON \"my_ns\".\"foo_table\" (\"c4\")", @@ -637,7 +655,7 @@ public void createTableInternal_IfNotExistsForSqlite_ShouldCreateTableAndIndexes throws SQLException { createTableInternal_IfNotExistsForX_createTableAndIndexesIfNotExists( RdbEngine.SQLITE, - "CREATE TABLE IF NOT EXISTS \"my_ns$foo_table\"(\"c3\" BOOLEAN,\"c1\" TEXT,\"c4\" BLOB,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE,\"c7\" FLOAT, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", + "CREATE TABLE IF NOT EXISTS \"my_ns$foo_table\"(\"c3\" BOOLEAN,\"c1\" TEXT,\"c4\" BLOB,\"c2\" BIGINT,\"c5\" INT,\"c6\" DOUBLE,\"c7\" FLOAT,\"c8\" BIGINT,\"c9\" BIGINT,\"c10\" BIGINT,\"c11\" BIGINT, PRIMARY KEY (\"c3\",\"c1\",\"c4\"))", "CREATE INDEX IF NOT EXISTS \"index_my_ns_foo_table_c4\" ON \"my_ns$foo_table\" (\"c4\")", "CREATE INDEX IF NOT EXISTS \"index_my_ns_foo_table_c1\" ON \"my_ns$foo_table\" (\"c1\")"); } @@ -666,6 +684,10 @@ private void createTableInternal_IfNotExistsForX_createTableAndIndexesIfNotExist .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c1") .addSecondaryIndex("c4") .build(); @@ -1003,7 +1025,11 @@ public void addTableMetadata_ifNotExistsAndDoNotOverwriteMetadataForMysql_Should + "`.`metadata` VALUES ('my_ns.foo_table','c6','DOUBLE',NULL,NULL,false,6)", "INSERT INTO `" + METADATA_SCHEMA - + "`.`metadata` VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,false,7)"); + + "`.`metadata` VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,false,7)", + "INSERT INTO `scalardb`.`metadata` VALUES ('my_ns.foo_table','c8','DATE',NULL,NULL,false,8)", + "INSERT INTO `scalardb`.`metadata` VALUES ('my_ns.foo_table','c9','TIME',NULL,NULL,false,9)", + "INSERT INTO `scalardb`.`metadata` VALUES ('my_ns.foo_table','c10','TIMESTAMP',NULL,NULL,false,10)", + "INSERT INTO `scalardb`.`metadata` VALUES ('my_ns.foo_table','c11','TIMESTAMPTZ',NULL,NULL,false,11)"); } @Test @@ -1044,7 +1070,11 @@ public void addTableMetadata_ifNotExistsAndDoNotOverwriteMetadataForMysql_Should + "\".\"metadata\" VALUES ('my_ns.foo_table','c6','DOUBLE',NULL,NULL,false,6)", "INSERT INTO \"" + METADATA_SCHEMA - + "\".\"metadata\" VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,false,7)"); + + "\".\"metadata\" VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,false,7)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c8','DATE',NULL,NULL,false,8)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c9','TIME',NULL,NULL,false,9)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c10','TIMESTAMP',NULL,NULL,false,10)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c11','TIMESTAMPTZ',NULL,NULL,false,11)"); } @Test @@ -1084,7 +1114,11 @@ public void addTableMetadata_ifNotExistsAndDoNotOverwriteMetadataForSqlServer_Sh + "].[metadata] VALUES ('my_ns.foo_table','c6','DOUBLE',NULL,NULL,0,6)", "INSERT INTO [" + METADATA_SCHEMA - + "].[metadata] VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,0,7)"); + + "].[metadata] VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,0,7)", + "INSERT INTO [scalardb].[metadata] VALUES ('my_ns.foo_table','c8','DATE',NULL,NULL,0,8)", + "INSERT INTO [scalardb].[metadata] VALUES ('my_ns.foo_table','c9','TIME',NULL,NULL,0,9)", + "INSERT INTO [scalardb].[metadata] VALUES ('my_ns.foo_table','c10','TIMESTAMP',NULL,NULL,0,10)", + "INSERT INTO [scalardb].[metadata] VALUES ('my_ns.foo_table','c11','TIMESTAMPTZ',NULL,NULL,0,11)"); } @Test @@ -1117,7 +1151,11 @@ public void addTableMetadata_ifNotExistsAndDoNotOverwriteMetadataForOracle_Shoul + "\".\"metadata\" VALUES ('my_ns.foo_table','c6','DOUBLE',NULL,NULL,0,6)", "INSERT INTO \"" + METADATA_SCHEMA - + "\".\"metadata\" VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,0,7)"); + + "\".\"metadata\" VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,0,7)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c8','DATE',NULL,NULL,0,8)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c9','TIME',NULL,NULL,0,9)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c10','TIMESTAMP',NULL,NULL,0,10)", + "INSERT INTO \"scalardb\".\"metadata\" VALUES ('my_ns.foo_table','c11','TIMESTAMPTZ',NULL,NULL,0,11)"); } @Test @@ -1156,7 +1194,11 @@ public void addTableMetadata_ifNotExistsAndDoNotOverwriteMetadataForSqlite_Shoul + "$metadata\" VALUES ('my_ns.foo_table','c6','DOUBLE',NULL,NULL,FALSE,6)", "INSERT INTO \"" + METADATA_SCHEMA - + "$metadata\" VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,FALSE,7)"); + + "$metadata\" VALUES ('my_ns.foo_table','c7','FLOAT',NULL,NULL,FALSE,7)", + "INSERT INTO \"scalardb$metadata\" VALUES ('my_ns.foo_table','c8','DATE',NULL,NULL,FALSE,8)", + "INSERT INTO \"scalardb$metadata\" VALUES ('my_ns.foo_table','c9','TIME',NULL,NULL,FALSE,9)", + "INSERT INTO \"scalardb$metadata\" VALUES ('my_ns.foo_table','c10','TIMESTAMP',NULL,NULL,FALSE,10)", + "INSERT INTO \"scalardb$metadata\" VALUES ('my_ns.foo_table','c11','TIMESTAMPTZ',NULL,NULL,FALSE,11)"); } private void addTableMetadata_createMetadataTableIfNotExistsForX_ShouldWorkProperly( @@ -1176,6 +1218,10 @@ private void addTableMetadata_createMetadataTableIfNotExistsForX_ShouldWorkPrope .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c1") .addSecondaryIndex("c4") .build(); @@ -1220,6 +1266,10 @@ public void createTable_ShouldCallCreateTableAndAddTableMetadataCorrectly(RdbEng .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c1") .addSecondaryIndex("c4") .build(); @@ -1255,6 +1305,10 @@ public void repairTable_ShouldCallCreateTableAndAddTableMetadataCorrectly(RdbEng .addColumn("c5", DataType.INT) .addColumn("c6", DataType.DOUBLE) .addColumn("c7", DataType.FLOAT) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) .addSecondaryIndex("c1") .addSecondaryIndex("c4") .build(); @@ -2640,7 +2694,7 @@ public Boolean answer(InvocationOnMock invocation) { .thenReturn(""); when(columnResults.getInt(JDBC_COL_COLUMN_SIZE)).thenReturn(0).thenReturn(0).thenReturn(0); when(columnResults.getInt(JDBC_COL_DECIMAL_DIGITS)).thenReturn(0).thenReturn(0).thenReturn(0); - RdbEngineStrategy rdbEngineStrategy = getRdbEngineStrategy(rdbEngine); + RdbEngineStrategy rdbEngineStrategy = spy(getRdbEngineStrategy(rdbEngine)); if (rdbEngineStrategy instanceof RdbEngineMysql) { when(metadata.getPrimaryKeys(NAMESPACE, NAMESPACE, TABLE)).thenReturn(primaryKeyResults); when(metadata.getColumns(NAMESPACE, NAMESPACE, TABLE, "%")).thenReturn(columnResults); @@ -2653,18 +2707,42 @@ public Boolean answer(InvocationOnMock invocation) { expectedColumns.put("pk1", DataType.TEXT); expectedColumns.put("pk2", DataType.TEXT); expectedColumns.put("col", DataType.FLOAT); - - JdbcAdmin admin = createJdbcAdminFor(rdbEngine); + JdbcAdmin admin = createJdbcAdminFor(rdbEngineStrategy); String description = "database engine specific test failed: " + rdbEngine; + Map overrideColumnsType = ImmutableMap.of("col", DataType.FLOAT); // Act - TableMetadata actual = admin.getImportTableMetadata(NAMESPACE, TABLE); + TableMetadata actual = admin.getImportTableMetadata(NAMESPACE, TABLE, overrideColumnsType); // Assert verify(checkTableExistStatement, description(description)) .execute(expectedCheckTableExistStatement); assertThat(actual.getPartitionKeyNames()).hasSameElementsAs(ImmutableSet.of("pk1", "pk2")); assertThat(actual.getColumnDataTypes()).containsExactlyEntriesOf(expectedColumns); + verify(rdbEngineStrategy) + .getDataTypeForScalarDb( + any(JDBCType.class), + anyString(), + anyInt(), + anyInt(), + eq(getFullTableName(NAMESPACE, TABLE) + " pk1"), + eq(null)); + verify(rdbEngineStrategy) + .getDataTypeForScalarDb( + any(JDBCType.class), + anyString(), + anyInt(), + anyInt(), + eq(getFullTableName(NAMESPACE, TABLE) + " pk2"), + eq(null)); + verify(rdbEngineStrategy) + .getDataTypeForScalarDb( + any(JDBCType.class), + anyString(), + anyInt(), + anyInt(), + eq(getFullTableName(NAMESPACE, TABLE) + " col"), + eq(DataType.FLOAT)); } private void getImportTableMetadata_ForSQLite_ShouldThrowUnsupportedOperationException( @@ -2673,7 +2751,7 @@ private void getImportTableMetadata_ForSQLite_ShouldThrowUnsupportedOperationExc JdbcAdmin admin = createJdbcAdminFor(rdbEngine); // Act Assert - assertThatThrownBy(() -> admin.getImportTableMetadata(NAMESPACE, TABLE)) + assertThatThrownBy(() -> admin.getImportTableMetadata(NAMESPACE, TABLE, Collections.emptyMap())) .isInstanceOf(UnsupportedOperationException.class); } @@ -2712,7 +2790,7 @@ private void getImportTableMetadata_WithNonExistingTableForX_ShouldThrowIllegalA when(checkTableExistStatement.execute(any())).thenThrow(sqlException); // Act Assert - assertThatThrownBy(() -> admin.getImportTableMetadata(NAMESPACE, TABLE)) + assertThatThrownBy(() -> admin.getImportTableMetadata(NAMESPACE, TABLE, Collections.emptyMap())) .isInstanceOf(IllegalArgumentException.class); verify( checkTableExistStatement, @@ -2741,7 +2819,9 @@ private void getImportTableMetadata_PrimaryKeyNotExistsForX_ShouldThrowIllegalSt String description = "database engine specific test failed: " + rdbEngine; // Act - Throwable thrown = catchThrowable(() -> admin.getImportTableMetadata(NAMESPACE, TABLE)); + Throwable thrown = + catchThrowable( + () -> admin.getImportTableMetadata(NAMESPACE, TABLE, Collections.emptyMap())); // Assert verify(checkTableExistStatement, description(description)) @@ -2774,8 +2854,8 @@ private void getImportTableMetadata_UnsupportedDataTypeGivenForX_ShouldThrowExec when(primaryKeyResults.getString(JDBC_COL_COLUMN_NAME)).thenReturn("pk1"); when(columnResults.next()).thenReturn(true).thenReturn(false); when(columnResults.getString(JDBC_COL_COLUMN_NAME)).thenReturn("pk1"); - when(columnResults.getInt(JDBC_COL_DATA_TYPE)).thenReturn(Types.TIMESTAMP); - when(columnResults.getString(JDBC_COL_TYPE_NAME)).thenReturn("timestamp"); + when(columnResults.getInt(JDBC_COL_DATA_TYPE)).thenReturn(Types.OTHER); + when(columnResults.getString(JDBC_COL_TYPE_NAME)).thenReturn("any_unsupported_type"); when(columnResults.getInt(JDBC_COL_COLUMN_SIZE)).thenReturn(0); when(columnResults.getInt(JDBC_COL_DECIMAL_DIGITS)).thenReturn(0); @@ -2792,7 +2872,9 @@ private void getImportTableMetadata_UnsupportedDataTypeGivenForX_ShouldThrowExec String description = "database engine specific test failed: " + rdbEngine; // Act - Throwable thrown = catchThrowable(() -> admin.getImportTableMetadata(NAMESPACE, TABLE)); + Throwable thrown = + catchThrowable( + () -> admin.getImportTableMetadata(NAMESPACE, TABLE, Collections.emptyMap())); // Assert verify(checkTableExistStatement, description(description)) @@ -2884,7 +2966,9 @@ public void importTable_ForXBesidesSqlite_ShouldWorkProperly(RdbEngine rdbEngine when(dataSource.getConnection()).thenReturn(connection); TableMetadata importedTableMetadata = mock(TableMetadata.class); - doReturn(importedTableMetadata).when(adminSpy).getImportTableMetadata(anyString(), anyString()); + doReturn(importedTableMetadata) + .when(adminSpy) + .getImportTableMetadata(anyString(), anyString(), anyMap()); doNothing().when(adminSpy).createNamespacesTableIfNotExists(connection); doNothing().when(adminSpy).upsertIntoNamespacesTable(any(), anyString()); doNothing() @@ -2892,10 +2976,10 @@ public void importTable_ForXBesidesSqlite_ShouldWorkProperly(RdbEngine rdbEngine .addTableMetadata(any(), anyString(), anyString(), any(), anyBoolean(), anyBoolean()); // Act - adminSpy.importTable(NAMESPACE, TABLE, Collections.emptyMap()); + adminSpy.importTable(NAMESPACE, TABLE, Collections.emptyMap(), Collections.emptyMap()); // Assert - verify(adminSpy).getImportTableMetadata(NAMESPACE, TABLE); + verify(adminSpy).getImportTableMetadata(NAMESPACE, TABLE, Collections.emptyMap()); verify(adminSpy).createNamespacesTableIfNotExists(connection); verify(adminSpy).upsertIntoNamespacesTable(connection, NAMESPACE); verify(adminSpy) diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcConfigTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcConfigTest.java index f08ce0c740..45648b7e8a 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcConfigTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcConfigTest.java @@ -4,6 +4,8 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.scalar.db.config.DatabaseConfig; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; import java.util.Properties; import org.junit.jupiter.api.Test; @@ -38,6 +40,7 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() { props.setProperty(JdbcConfig.ADMIN_CONNECTION_POOL_MAX_TOTAL, "200"); props.setProperty(JdbcConfig.MYSQL_VARIABLE_KEY_COLUMN_SIZE, "64"); props.setProperty(JdbcConfig.ORACLE_VARIABLE_KEY_COLUMN_SIZE, "64"); + props.setProperty(JdbcConfig.ORACLE_TIME_COLUMN_DEFAULT_DATE_COMPONENT, "2020-01-01"); // Act JdbcConfig config = new JdbcConfig(new DatabaseConfig(props)); @@ -64,6 +67,8 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() { assertThat(config.getAdminConnectionPoolMaxTotal()).isEqualTo(200); assertThat(config.getMysqlVariableKeyColumnSize()).isEqualTo(64); assertThat(config.getOracleVariableKeyColumnSize()).isEqualTo(64); + assertThat(config.getOracleTimeColumnDefaultDateComponent()) + .isEqualTo(LocalDate.parse("2020-01-01", DateTimeFormatter.ISO_LOCAL_DATE)); } @Test @@ -107,6 +112,8 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() { .isEqualTo(JdbcConfig.DEFAULT_VARIABLE_KEY_COLUMN_SIZE); assertThat(config.getOracleVariableKeyColumnSize()) .isEqualTo(JdbcConfig.DEFAULT_VARIABLE_KEY_COLUMN_SIZE); + assertThat(config.getOracleTimeColumnDefaultDateComponent()) + .isEqualTo(JdbcConfig.DEFAULT_ORACLE_TIME_COLUMN_DEFAULT_DATE_COMPONENT); } @Test diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcUtilsTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcUtilsTest.java index 767e721b62..6b3b3edada 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/JdbcUtilsTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/JdbcUtilsTest.java @@ -1,17 +1,27 @@ package com.scalar.db.storage.jdbc; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; +import com.google.common.collect.ImmutableMap; import com.scalar.db.config.DatabaseConfig; import java.sql.Connection; import java.sql.Driver; import java.sql.SQLException; +import java.util.Collections; import java.util.Properties; import org.apache.commons.dbcp2.BasicDataSource; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Answers; import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.mockito.MockitoAnnotations; public class JdbcUtilsTest { @@ -41,6 +51,7 @@ public void initDataSource_NonTransactional_ShouldReturnProperDataSource() throw JdbcConfig config = new JdbcConfig(new DatabaseConfig(properties)); Driver driver = new com.mysql.cj.jdbc.Driver(); when(rdbEngine.getDriver()).thenReturn(driver); + when(rdbEngine.getConnectionProperties()).thenReturn(Collections.emptyMap()); // Act BasicDataSource dataSource = JdbcUtils.initDataSource(config, rdbEngine); @@ -83,6 +94,7 @@ public void initDataSource_Transactional_ShouldReturnProperDataSource() throws S JdbcConfig config = new JdbcConfig(new DatabaseConfig(properties)); Driver driver = new org.postgresql.Driver(); when(rdbEngine.getDriver()).thenReturn(driver); + when(rdbEngine.getConnectionProperties()).thenReturn(Collections.emptyMap()); // Act BasicDataSource dataSource = JdbcUtils.initDataSource(config, rdbEngine, true); @@ -107,6 +119,39 @@ public void initDataSource_Transactional_ShouldReturnProperDataSource() throws S dataSource.close(); } + @Test + public void initDataSource_WithRdbEngineConnectionProperties_ShouldAddProperties() { + // Arrange + Properties properties = new Properties(); + properties.setProperty( + DatabaseConfig.CONTACT_POINTS, + "jdbc:sqlserver://localhost:5432;prop1=prop1Value;prop3=prop3Value"); + properties.setProperty(DatabaseConfig.USERNAME, "foo"); + properties.setProperty(DatabaseConfig.PASSWORD, "pass"); + properties.setProperty(DatabaseConfig.STORAGE, "jdbc"); + + JdbcConfig config = new JdbcConfig(new DatabaseConfig(properties)); + Driver driver = new com.microsoft.sqlserver.jdbc.SQLServerDriver(); + when(rdbEngine.getDriver()).thenReturn(driver); + when(rdbEngine.getConnectionProperties()) + .thenReturn(ImmutableMap.of("prop1", "prop1Value", "prop2", "prop2Value")); + + try (MockedStatic jdbcUtils = + Mockito.mockStatic( + JdbcUtils.class, withSettings().defaultAnswer(Answers.CALLS_REAL_METHODS))) { + BasicDataSource dataSource = spy(BasicDataSource.class); + jdbcUtils.when(JdbcUtils::createDataSource).thenReturn(dataSource); + + // Act + jdbcUtils.when(() -> JdbcUtils.initDataSource(config, rdbEngine)).thenCallRealMethod(); + + // Assert + verify(dataSource).addConnectionProperty("prop1", "prop1Value"); + verify(dataSource).addConnectionProperty("prop2", "prop2Value"); + verify(dataSource, never()).setConnectionProperties(anyString()); + } + } + @Test public void initDataSourceForTableMetadata_ShouldReturnProperDataSource() throws SQLException { // Arrange diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngineTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngineTest.java index fabbeff060..202dd5e212 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngineTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/RdbEngineTest.java @@ -12,6 +12,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.params.ParameterizedTest; @@ -40,19 +41,47 @@ public void getDataTypeForScalarDbTest(RdbEngine rdbEngineType) { String description = String.format( "database engine specific test failed: " - + "%s, JDBCType = %s, type name = %s, column size = %d, digits = %dp", - rdbEngineType, given.type, given.typeName, given.columnSize, given.digits); + + "%s, JDBCType = %s, type name = %s, column size = %d, digits = %dp, overrideDataType = %s", + rdbEngineType, + given.type, + given.typeName, + given.columnSize, + given.digits, + given.overrideDataType); if (expected != null) { - DataType actual = - rdbEngine.getDataTypeForScalarDb( - given.type, given.typeName, given.columnSize, given.digits, ""); - assertThat(actual).as(description).isEqualTo(expected); + if (given.overrideDataType != null) { + DataType actualWithAllowedOverride = + rdbEngine.getDataTypeForScalarDb( + given.type, + given.typeName, + given.columnSize, + given.digits, + "", + given.overrideDataType); + assertThat(actualWithAllowedOverride).as(description).isEqualTo(expected); + } else { + DataType actualWithoutOverride = + rdbEngine.getDataTypeForScalarDb( + given.type, given.typeName, given.columnSize, given.digits, "", null); + assertThat(actualWithoutOverride).as(description).isEqualTo(expected); + + // Overriding with the default type mapping should works as well + DataType actualWithOverrideSameAsDefault = + rdbEngine.getDataTypeForScalarDb( + given.type, given.typeName, given.columnSize, given.digits, "", expected); + assertThat(actualWithOverrideSameAsDefault).as(description).isEqualTo(expected); + } } else { Throwable thrown = catchThrowable( () -> rdbEngine.getDataTypeForScalarDb( - given.type, given.typeName, given.columnSize, given.digits, "")); + given.type, + given.typeName, + given.columnSize, + given.digits, + "", + given.overrideDataType)); assertThat(thrown).as(description).isInstanceOf(IllegalArgumentException.class); } }); @@ -155,18 +184,64 @@ private static void prepareDataTypeMap() { DATA_TYPE_MAP.get(SQL_SERVER).put(new Column(JDBCType.DECIMAL, "decimal"), null); // DATE/TIME/TIMESTAMP - DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.DATE, "DATE"), null); - DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.TIME, "TIME"), null); - DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.TIMESTAMP, "TIMESTAMP"), null); - DATA_TYPE_MAP.get(POSTGRESQL).put(new Column(JDBCType.DATE, "date"), null); - DATA_TYPE_MAP.get(POSTGRESQL).put(new Column(JDBCType.TIME, "time"), null); - DATA_TYPE_MAP.get(POSTGRESQL).put(new Column(JDBCType.TIMESTAMP, "timestamp"), null); - DATA_TYPE_MAP.get(ORACLE).put(new Column(JDBCType.DATE, "DATE"), null); - DATA_TYPE_MAP.get(ORACLE).put(new Column(JDBCType.TIME, "TIME"), null); - DATA_TYPE_MAP.get(ORACLE).put(new Column(JDBCType.TIMESTAMP, "TIMESTAMP"), null); - DATA_TYPE_MAP.get(SQL_SERVER).put(new Column(JDBCType.DATE, "date"), null); - DATA_TYPE_MAP.get(SQL_SERVER).put(new Column(JDBCType.TIME, "time"), null); - DATA_TYPE_MAP.get(SQL_SERVER).put(new Column(JDBCType.TIMESTAMP, "datetime"), null); + DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.DATE, "DATE"), DataType.DATE); + DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.DATE, "YEAR"), null); + DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.TIME, "TIME"), DataType.TIME); + DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.TIMESTAMP, "DATETIME"), DataType.TIMESTAMP); + DATA_TYPE_MAP + .get(MYSQL) + .put( + new Column(JDBCType.TIMESTAMP, "DATETIME", DataType.TIMESTAMPTZ), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.TIMESTAMP, "TIMESTAMP"), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP + .get(MYSQL) + .put(new Column(JDBCType.TIMESTAMP, "TIMESTAMP", DataType.TIMESTAMP), null); + DATA_TYPE_MAP.get(POSTGRESQL).put(new Column(JDBCType.DATE, "date"), DataType.DATE); + DATA_TYPE_MAP.get(POSTGRESQL).put(new Column(JDBCType.TIME, "time"), DataType.TIME); + DATA_TYPE_MAP.get(POSTGRESQL).put(new Column(JDBCType.TIME, "timetz"), null); + DATA_TYPE_MAP + .get(POSTGRESQL) + .put(new Column(JDBCType.TIMESTAMP, "timestamp"), DataType.TIMESTAMP); + DATA_TYPE_MAP + .get(POSTGRESQL) + .put(new Column(JDBCType.TIMESTAMP, "timestamptz"), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP.get(ORACLE).put(new Column(JDBCType.TIMESTAMP, "DATE"), DataType.DATE); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.TIMESTAMP, "DATE", DataType.TIME), DataType.TIME); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.TIMESTAMP, "DATE", DataType.TIMESTAMP), DataType.TIMESTAMP); + DATA_TYPE_MAP.get(ORACLE).put(new Column(JDBCType.TIMESTAMP, "TIMESTAMP"), DataType.TIMESTAMP); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.TIMESTAMP, "TIMESTAMP", DataType.TIME), DataType.TIME); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.OTHER, "TIMESTAMP WITH TIME ZONE"), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.OTHER, "TIMESTAMP(3) WITH TIME ZONE"), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.OTHER, "TIMESTAMP WITH LOCAL TIME ZONE"), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP + .get(ORACLE) + .put(new Column(JDBCType.OTHER, "TIMESTAMP(1) WITH LOCAL TIME ZONE"), DataType.TIMESTAMPTZ); + DATA_TYPE_MAP.get(SQL_SERVER).put(new Column(JDBCType.DATE, "date"), DataType.DATE); + DATA_TYPE_MAP.get(SQL_SERVER).put(new Column(JDBCType.TIME, "time"), DataType.TIME); + DATA_TYPE_MAP + .get(SQL_SERVER) + .put(new Column(JDBCType.TIMESTAMP, "datetime"), DataType.TIMESTAMP); + DATA_TYPE_MAP + .get(SQL_SERVER) + .put(new Column(JDBCType.TIMESTAMP, "datetime2"), DataType.TIMESTAMP); + DATA_TYPE_MAP + .get(SQL_SERVER) + .put(new Column(JDBCType.TIMESTAMP, "smalldatetime"), DataType.TIMESTAMP); + DATA_TYPE_MAP + .get(SQL_SERVER) + .put(new Column(JDBCType.OTHER, "datetimeoffset"), DataType.TIMESTAMPTZ); // Other unsupported data types DATA_TYPE_MAP.get(MYSQL).put(new Column(JDBCType.BIT, "BIT", 8, 0), null); @@ -186,16 +261,31 @@ private static class Column { final String typeName; final int columnSize; final int digits; + @Nullable final DataType overrideDataType; Column(JDBCType type, String typeName) { - this(type, typeName, 0, 0); + this(type, typeName, 0, 0, null); + } + + Column(JDBCType type, String typeName, DataType overrideDataType) { + this(type, typeName, 0, 0, overrideDataType); } Column(JDBCType type, String typeName, int columnSize, int digits) { + this(type, typeName, columnSize, digits, null); + } + + Column( + JDBCType type, + String typeName, + int columnSize, + int digits, + @Nullable DataType overrideDataType) { this.type = type; this.typeName = typeName; this.columnSize = columnSize; this.digits = digits; + this.overrideDataType = overrideDataType; } @Override @@ -215,7 +305,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(type, typeName, columnSize, digits); + return Objects.hash(type, typeName, columnSize, digits, overrideDataType); } @Override diff --git a/core/src/test/java/com/scalar/db/storage/jdbc/ResultInterpreterTest.java b/core/src/test/java/com/scalar/db/storage/jdbc/ResultInterpreterTest.java index d4490a6df0..f9268b6adf 100644 --- a/core/src/test/java/com/scalar/db/storage/jdbc/ResultInterpreterTest.java +++ b/core/src/test/java/com/scalar/db/storage/jdbc/ResultInterpreterTest.java @@ -6,12 +6,22 @@ import com.scalar.db.api.Result; import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.BigIntColumn; import com.scalar.db.io.BigIntValue; -import com.scalar.db.io.Value; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.sql.ResultSet; import java.sql.SQLException; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Collections; import java.util.List; import java.util.Map; @@ -33,23 +43,37 @@ public class ResultInterpreterTest { private static final String ANY_COLUMN_NAME_5 = "col5"; private static final String ANY_COLUMN_NAME_6 = "col6"; private static final String ANY_COLUMN_NAME_7 = "col7"; + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; + private static final String ANY_COLUMN_NAME_11 = "col11"; private static final TableMetadata TABLE_METADATA = TableMetadata.newBuilder() - .addColumn(ANY_NAME_1, com.scalar.db.io.DataType.TEXT) - .addColumn(ANY_NAME_2, com.scalar.db.io.DataType.TEXT) - .addColumn(ANY_COLUMN_NAME_1, com.scalar.db.io.DataType.BOOLEAN) - .addColumn(ANY_COLUMN_NAME_2, com.scalar.db.io.DataType.INT) - .addColumn(ANY_COLUMN_NAME_3, com.scalar.db.io.DataType.BIGINT) - .addColumn(ANY_COLUMN_NAME_4, com.scalar.db.io.DataType.FLOAT) - .addColumn(ANY_COLUMN_NAME_5, com.scalar.db.io.DataType.DOUBLE) - .addColumn(ANY_COLUMN_NAME_6, com.scalar.db.io.DataType.TEXT) - .addColumn(ANY_COLUMN_NAME_7, com.scalar.db.io.DataType.BLOB) + .addColumn(ANY_NAME_1, DataType.TEXT) + .addColumn(ANY_NAME_2, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_1, DataType.BOOLEAN) + .addColumn(ANY_COLUMN_NAME_2, DataType.INT) + .addColumn(ANY_COLUMN_NAME_3, DataType.BIGINT) + .addColumn(ANY_COLUMN_NAME_4, DataType.FLOAT) + .addColumn(ANY_COLUMN_NAME_5, DataType.DOUBLE) + .addColumn(ANY_COLUMN_NAME_6, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_7, DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMP) + .addColumn(ANY_COLUMN_NAME_11, DataType.TIMESTAMPTZ) .addPartitionKey(ANY_NAME_1) .addClusteringKey(ANY_NAME_2) .build(); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + @Mock private ResultSet resultSet; + @Mock private RdbEngineStrategy rdbEngine; @BeforeEach public void setUp() throws Exception { @@ -69,86 +93,110 @@ public void interpret_ShouldReturnWhatsSet() throws SQLException { when(resultSet.getString(ANY_COLUMN_NAME_6)).thenReturn("string"); when(resultSet.getBytes(ANY_COLUMN_NAME_7)) .thenReturn("bytes".getBytes(StandardCharsets.UTF_8)); + when(rdbEngine.parseDateColumn(resultSet, ANY_COLUMN_NAME_8)) + .thenReturn(DateColumn.of(ANY_COLUMN_NAME_8, ANY_DATE)); + when(rdbEngine.parseTimeColumn(resultSet, ANY_COLUMN_NAME_9)) + .thenReturn(TimeColumn.of(ANY_COLUMN_NAME_9, ANY_TIME)); + when(rdbEngine.parseTimestampColumn(resultSet, ANY_COLUMN_NAME_10)) + .thenReturn(TimestampColumn.of(ANY_COLUMN_NAME_10, ANY_TIMESTAMP)); + when(rdbEngine.parseTimestampTZColumn(resultSet, ANY_COLUMN_NAME_11)) + .thenReturn(TimestampTZColumn.of(ANY_COLUMN_NAME_11, ANY_TIMESTAMPTZ)); + when(resultSet.wasNull()).thenReturn(false); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA, rdbEngine); // Act - Result result = spy.interpret(resultSet); + Result result = interpreter.interpret(resultSet); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()) - .isEqualTo(BigIntValue.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString().get()).isEqualTo("string"); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isTrue(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(Integer.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(BigIntValue.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(Float.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(Double.MAX_VALUE); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString().get()).isEqualTo("string"); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().isPresent()).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes().get()) - .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isFalse(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isFalse(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(Integer.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isFalse(); - assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntValue.MAX_VALUE); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isFalse(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(Float.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isFalse(); assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(Double.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isFalse(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isEqualTo("string"); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isFalse(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)) .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)) .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isTrue(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(Float.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isEqualTo("string"); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.containsKey(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); } @Test @@ -163,79 +211,104 @@ public void interpret_ShouldReturnWhatsSetWithNullValues() throws SQLException { when(resultSet.getDouble(ANY_COLUMN_NAME_5)).thenReturn(0.0D); when(resultSet.getString(ANY_COLUMN_NAME_6)).thenReturn(null); when(resultSet.getBytes(ANY_COLUMN_NAME_7)).thenReturn(null); + when(rdbEngine.parseDateColumn(resultSet, ANY_COLUMN_NAME_8)) + .thenReturn(DateColumn.ofNull(ANY_COLUMN_NAME_8)); + when(rdbEngine.parseTimeColumn(resultSet, ANY_COLUMN_NAME_9)) + .thenReturn(TimeColumn.ofNull(ANY_COLUMN_NAME_9)); + when(rdbEngine.parseTimestampColumn(resultSet, ANY_COLUMN_NAME_10)) + .thenReturn(TimestampColumn.ofNull(ANY_COLUMN_NAME_10)); + when(rdbEngine.parseTimestampTZColumn(resultSet, ANY_COLUMN_NAME_11)) + .thenReturn(TimestampTZColumn.ofNull(ANY_COLUMN_NAME_11)); when(resultSet.wasNull()).thenReturn(false).thenReturn(false).thenReturn(true); List projections = Collections.emptyList(); - ResultInterpreter spy = spy(new ResultInterpreter(projections, TABLE_METADATA)); + ResultInterpreter interpreter = + spy(new ResultInterpreter(projections, TABLE_METADATA, rdbEngine)); // Act - Result result = spy.interpret(resultSet); + Result result = interpreter.interpret(resultSet); // Assert - assertThat(result.getValue(ANY_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_1).get().getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(result.getValue(ANY_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().isPresent()).isTrue(); - assertThat(result.getValue(ANY_NAME_2).get().getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(result.getValue(ANY_COLUMN_NAME_1).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_1).get().getAsBoolean()).isFalse(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_2).get().getAsInt()).isEqualTo(0); - assertThat(result.getValue(ANY_COLUMN_NAME_3).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_3).get().getAsLong()).isEqualTo(0L); - assertThat(result.getValue(ANY_COLUMN_NAME_4).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_4).get().getAsFloat()).isEqualTo(0.0F); - assertThat(result.getValue(ANY_COLUMN_NAME_5).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_5).get().getAsDouble()).isEqualTo(0.0D); - assertThat(result.getValue(ANY_COLUMN_NAME_6).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_6).get().getAsString()).isNotPresent(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).isPresent()).isTrue(); - assertThat(result.getValue(ANY_COLUMN_NAME_7).get().getAsBytes()).isNotPresent(); - - Map> values = result.getValues(); - assertThat(values.containsKey(ANY_NAME_1)).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_1).getAsString().get()).isEqualTo(ANY_TEXT_1); - assertThat(values.containsKey(ANY_NAME_2)).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().isPresent()).isTrue(); - assertThat(values.get(ANY_NAME_2).getAsString().get()).isEqualTo(ANY_TEXT_2); - - assertThat(values.containsKey(ANY_COLUMN_NAME_1)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_1).getAsBoolean()).isFalse(); - assertThat(values.containsKey(ANY_COLUMN_NAME_2)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_2).getAsInt()).isEqualTo(0); - assertThat(values.containsKey(ANY_COLUMN_NAME_3)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_3).getAsLong()).isEqualTo(0L); - assertThat(values.containsKey(ANY_COLUMN_NAME_4)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_4).getAsFloat()).isEqualTo(0.0F); - assertThat(values.containsKey(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_5).getAsDouble()).isEqualTo(0.0D); - assertThat(values.containsKey(ANY_COLUMN_NAME_6)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_6).getAsString()).isNotPresent(); - assertThat(values.containsKey(ANY_COLUMN_NAME_7)).isTrue(); - assertThat(values.get(ANY_COLUMN_NAME_7).getAsBytes()).isNotPresent(); - + assertThat(result.contains(ANY_NAME_1)).isTrue(); assertThat(result.isNull(ANY_NAME_1)).isFalse(); assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); assertThat(result.isNull(ANY_NAME_2)).isFalse(); assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_1)).isTrue(); assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_2)).isTrue(); assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(0); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_3)).isTrue(); assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(0L); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_4)).isTrue(); assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(0.0F); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_5)).isTrue(); - assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0.0D); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0D); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_6)).isTrue(); assertThat(result.getText(ANY_COLUMN_NAME_6)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.isNull(ANY_COLUMN_NAME_7)).isTrue(); assertThat(result.getBlob(ANY_COLUMN_NAME_7)).isNull(); - assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isNull(); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isFalse(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(0); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(0L); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(0.0F); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(0D); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isNull(); } } diff --git a/core/src/test/java/com/scalar/db/storage/multistorage/MultiStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/multistorage/MultiStorageAdminTest.java index 6c2b1ffab9..bceea4defc 100644 --- a/core/src/test/java/com/scalar/db/storage/multistorage/MultiStorageAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/multistorage/MultiStorageAdminTest.java @@ -617,4 +617,34 @@ public void upgrade_ShouldCallNamespaceAndDefaultAdmins() throws ExecutionExcept verify(admin1).upgrade(options); verify(admin2).upgrade(options); } + + @Test + public void importTable_ForTable1InNamespace1_ShouldCallAdmin1() throws ExecutionException { + // Arrange + String namespace = NAMESPACE1; + String table = TABLE1; + Map options = ImmutableMap.of("a", "b"); + Map overrideColumnsType = ImmutableMap.of("c", DataType.TIMESTAMPTZ); + + // Act + multiStorageAdmin.importTable(namespace, table, options, overrideColumnsType); + + // Assert + verify(admin1).importTable(namespace, table, options, overrideColumnsType); + } + + @Test + public void getImportTableMetadata_ForTable1InNamespace1_ShouldCallAdmin1() + throws ExecutionException { + // Arrange + String namespace = NAMESPACE1; + String table = TABLE1; + Map overrideColumnsType = ImmutableMap.of("c", DataType.TIMESTAMPTZ); + + // Act + multiStorageAdmin.getImportTableMetadata(namespace, table, overrideColumnsType); + + // Assert + verify(admin1).getImportTableMetadata(namespace, table, overrideColumnsType); + } } diff --git a/core/src/test/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdminTestBase.java b/core/src/test/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdminTestBase.java index 7b6c29c6ad..696ea3c5d8 100644 --- a/core/src/test/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdminTestBase.java +++ b/core/src/test/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitAdminTestBase.java @@ -606,6 +606,7 @@ public void addNewColumnToTable_WithEncrypted_ShouldThrowUnsupportedOperationExc public void importTable_ShouldCallStorageAdminProperly() throws ExecutionException { // Arrange Map options = ImmutableMap.of("foo", "bar"); + Map overrideColumnsType = ImmutableMap.of("col", DataType.TEXT); String primaryKeyColumn = "pk"; String column = "col"; TableMetadata metadata = @@ -615,17 +616,18 @@ public void importTable_ShouldCallStorageAdminProperly() throws ExecutionExcepti .addPartitionKey(primaryKeyColumn) .build(); when(distributedStorageAdmin.getTableMetadata(NAMESPACE, TABLE)).thenReturn(null); - when(distributedStorageAdmin.getImportTableMetadata(NAMESPACE, TABLE)).thenReturn(metadata); + when(distributedStorageAdmin.getImportTableMetadata(NAMESPACE, TABLE, overrideColumnsType)) + .thenReturn(metadata); doNothing() .when(distributedStorageAdmin) .addRawColumnToTable(anyString(), anyString(), anyString(), any(DataType.class)); // Act - admin.importTable(NAMESPACE, TABLE, options); + admin.importTable(NAMESPACE, TABLE, options, overrideColumnsType); // Assert verify(distributedStorageAdmin).getTableMetadata(NAMESPACE, TABLE); - verify(distributedStorageAdmin).getImportTableMetadata(NAMESPACE, TABLE); + verify(distributedStorageAdmin).getImportTableMetadata(NAMESPACE, TABLE, overrideColumnsType); for (Entry entry : ConsensusCommitUtils.getTransactionMetaColumns().entrySet()) { verify(distributedStorageAdmin) diff --git a/core/src/test/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdminTest.java b/core/src/test/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdminTest.java index 02bbca7226..8e0f9f077c 100644 --- a/core/src/test/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdminTest.java +++ b/core/src/test/java/com/scalar/db/transaction/jdbc/JdbcTransactionAdminTest.java @@ -235,10 +235,10 @@ public void importTable_ShouldCallJdbcAdminProperly() throws ExecutionException String table = "tbl"; // Act - admin.importTable(namespace, table, Collections.emptyMap()); + admin.importTable(namespace, table, Collections.emptyMap(), Collections.emptyMap()); // Assert - verify(jdbcAdmin).importTable(namespace, table, Collections.emptyMap()); + verify(jdbcAdmin).importTable(namespace, table, Collections.emptyMap(), Collections.emptyMap()); } @Test diff --git a/core/src/test/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdminTest.java b/core/src/test/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdminTest.java index 1ed54f438f..b108f5d801 100644 --- a/core/src/test/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdminTest.java +++ b/core/src/test/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionAdminTest.java @@ -238,12 +238,14 @@ public void importTable_ShouldCallDistributedStorageAdminProperly() throws Execu // Arrange String namespace = "ns"; String table = "tbl"; + Map options = ImmutableMap.of("a", "b"); + Map overrideColumnsType = ImmutableMap.of("c", DataType.TIMESTAMPTZ); // Act - admin.importTable(namespace, table, Collections.emptyMap()); + admin.importTable(namespace, table, options, overrideColumnsType); // Assert - verify(distributedStorageAdmin).importTable(namespace, table, Collections.emptyMap()); + verify(distributedStorageAdmin).importTable(namespace, table, options, overrideColumnsType); } @Test diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminImportTableIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminImportTableIntegrationTestBase.java index dae2a4b559..6036112dde 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminImportTableIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminImportTableIntegrationTestBase.java @@ -4,12 +4,19 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; import com.scalar.db.service.StorageFactory; +import java.sql.SQLException; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; -import java.util.Map.Entry; +import java.util.Optional; import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -26,9 +33,9 @@ public abstract class DistributedStorageAdminImportTableIntegrationTestBase { private static final String TEST_NAME = "storage_admin_import_table"; private static final String NAMESPACE = "int_test_" + TEST_NAME; - private final Map tables = new HashMap<>(); - + private final List testDataList = new ArrayList<>(); protected DistributedStorageAdmin admin; + protected DistributedStorage storage; @BeforeAll public void beforeAll() throws Exception { @@ -48,13 +55,11 @@ protected Map getCreationOptions() { } private void dropTable() throws Exception { - for (Entry entry : tables.entrySet()) { - String table = entry.getKey(); - TableMetadata metadata = entry.getValue(); - if (metadata == null) { - dropNonImportableTable(table); + for (TestData testData : testDataList) { + if (!testData.isImportableTable()) { + dropNonImportableTable(testData.getTableName()); } else { - admin.dropTable(getNamespace(), table); + admin.dropTable(getNamespace(), testData.getTableName()); } } if (!admin.namespaceExists(getNamespace())) { @@ -68,6 +73,7 @@ private void dropTable() throws Exception { protected void setUp() throws Exception { StorageFactory factory = StorageFactory.create(getProperties(TEST_NAME)); admin = factory.getStorageAdmin(); + storage = factory.getStorage(); } @AfterEach @@ -90,27 +96,29 @@ protected void afterEach() { @AfterAll protected void afterAll() throws Exception {} - protected abstract Map createExistingDatabaseWithAllDataTypes() - throws Exception; + protected abstract List createExistingDatabaseWithAllDataTypes() throws SQLException; protected abstract void dropNonImportableTable(String table) throws Exception; @Test public void importTable_ShouldWorkProperly() throws Exception { // Arrange - tables.putAll(createExistingDatabaseWithAllDataTypes()); + testDataList.addAll(createExistingDatabaseWithAllDataTypes()); // Act Assert - for (Entry entry : tables.entrySet()) { - String table = entry.getKey(); - TableMetadata metadata = entry.getValue(); - if (metadata == null) { - importTable_ForNonImportableTable_ShouldThrowIllegalArgumentException(table); + for (TestData testData : testDataList) { + if (!testData.isImportableTable()) { + importTable_ForNonImportableTable_ShouldThrowIllegalArgumentException( + testData.getTableName()); } else { - importTable_ForImportableTable_ShouldImportProperly(table, metadata); + importTable_ForImportableTable_ShouldImportProperly( + testData.getTableName(), + testData.getOverrideColumnsType(), + testData.getTableMetadata()); } } importTable_ForNonExistingTable_ShouldThrowIllegalArgumentException(); + importTable_ForImportedTable_ShouldPutThenGetCorrectly(); } @Test @@ -123,9 +131,10 @@ public void importTable_ForUnsupportedDatabase_ShouldThrowUnsupportedOperationEx } private void importTable_ForImportableTable_ShouldImportProperly( - String table, TableMetadata metadata) throws ExecutionException { + String table, Map overrideColumnsType, TableMetadata metadata) + throws ExecutionException { // Act - admin.importTable(getNamespace(), table, Collections.emptyMap()); + admin.importTable(getNamespace(), table, Collections.emptyMap(), overrideColumnsType); // Assert assertThat(admin.namespaceExists(getNamespace())).isTrue(); @@ -147,4 +156,84 @@ private void importTable_ForNonExistingTable_ShouldThrowIllegalArgumentException () -> admin.importTable(getNamespace(), "non-existing-table", Collections.emptyMap())) .isInstanceOf(IllegalArgumentException.class); } + + public void importTable_ForImportedTable_ShouldPutThenGetCorrectly() throws ExecutionException { + // Arrange + List puts = + testDataList.stream() + .filter(TestData::isImportableTable) + .map(td -> td.getPut(getNamespace(), td.getTableName())) + .collect(Collectors.toList()); + List gets = + testDataList.stream() + .filter(TestData::isImportableTable) + .map(td -> td.getGet(getNamespace(), td.getTableName())) + .collect(Collectors.toList()); + + // Act + for (Put put : puts) { + storage.put(put); + } + List> results = new ArrayList<>(); + for (Get get : gets) { + results.add(storage.get(get)); + } + + // Assert + for (int i = 0; i < results.size(); i++) { + Put put = puts.get(i); + Optional optResult = results.get(i); + + assertThat(optResult).isPresent(); + Result result = optResult.get(); + Set actualColumnNamesWithoutKeys = new HashSet<>(result.getContainedColumnNames()); + actualColumnNamesWithoutKeys.removeAll( + put.getPartitionKey().getColumns().stream() + .map(Column::getName) + .collect(Collectors.toSet())); + + assertThat(actualColumnNamesWithoutKeys) + .containsExactlyInAnyOrderElementsOf(put.getContainedColumnNames()); + result.getColumns().entrySet().stream() + .filter( + e -> { + // Filter partition key columns + return !put.getPartitionKey().getColumns().contains(e.getValue()); + }) + .forEach( + entry -> + // Assert each result column is equal to the column inserted with the put + assertThat(entry.getValue()).isEqualTo(put.getColumns().get(entry.getKey()))); + } + } + + /** This interface defines test data for running import table related integration tests. */ + public interface TestData { + + /** Returns true if the table is supported for import, false otherwise */ + boolean isImportableTable(); + + /** Returns the table name */ + String getTableName(); + + /** Returns the columns for which the data type should be overridden when importing the table */ + Map getOverrideColumnsType(); + + /* + * Returns the expected table metadata of the imported table + */ + TableMetadata getTableMetadata(); + + /** Returns a sample Insert operation for the table */ + Insert getInsert(String namespace, String table); + + /** Returns a sample Put operation for the table */ + Put getPut(String namespace, String table); + + /** + * Returns a Get operation to retrieve the record inserted with {@link #getPut(String, String)} + * or {@link #getInsert(String, String)} + */ + Get getGet(String namespace, String table); + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminIntegrationTestBase.java index 705c78e0ad..11060d554b 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminIntegrationTestBase.java @@ -12,6 +12,10 @@ import com.scalar.db.util.AdminTestUtils; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -50,6 +54,10 @@ public abstract class DistributedStorageAdminIntegrationTestBase { private static final String COL_NAME9 = "c9"; private static final String COL_NAME10 = "c10"; private static final String COL_NAME11 = "c11"; + private static final String COL_NAME12 = "c12"; + private static final String COL_NAME13 = "c13"; + private static final String COL_NAME14 = "c14"; + private static final String COL_NAME15 = "c15"; private static final TableMetadata TABLE_METADATA = TableMetadata.newBuilder() @@ -64,6 +72,9 @@ public abstract class DistributedStorageAdminIntegrationTestBase { .addColumn(COL_NAME9, DataType.DOUBLE) .addColumn(COL_NAME10, DataType.BOOLEAN) .addColumn(COL_NAME11, DataType.BLOB) + .addColumn(COL_NAME12, DataType.DATE) + .addColumn(COL_NAME13, DataType.TIME) + .addColumn(COL_NAME14, DataType.TIMESTAMPTZ) .addPartitionKey(COL_NAME2) .addPartitionKey(COL_NAME1) .addClusteringKey(COL_NAME4, Scan.Ordering.Order.ASC) @@ -153,6 +164,9 @@ private void dropTables() throws ExecutionException { public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() throws ExecutionException { // Arrange + if (isTimestampTypeSupported()) { + admin.addNewColumnToTable(namespace1, TABLE1, COL_NAME15, DataType.TIMESTAMP); + } // Act TableMetadata tableMetadata = admin.getTableMetadata(namespace1, TABLE1); @@ -170,7 +184,12 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(iterator.next()).isEqualTo(COL_NAME4); assertThat(iterator.next()).isEqualTo(COL_NAME3); - assertThat(tableMetadata.getColumnNames().size()).isEqualTo(11); + if (isTimestampTypeSupported()) { + assertThat(tableMetadata.getColumnNames().size()).isEqualTo(15); + } else { + assertThat(tableMetadata.getColumnNames().size()).isEqualTo(14); + } + assertThat(tableMetadata.getColumnNames().contains(COL_NAME1)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME2)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME3)).isTrue(); @@ -182,6 +201,12 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(tableMetadata.getColumnNames().contains(COL_NAME9)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME10)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME11)).isTrue(); + assertThat(tableMetadata.getColumnNames().contains(COL_NAME12)).isTrue(); + assertThat(tableMetadata.getColumnNames().contains(COL_NAME13)).isTrue(); + assertThat(tableMetadata.getColumnNames().contains(COL_NAME14)).isTrue(); + if (isTimestampTypeSupported()) { + assertThat(tableMetadata.getColumnNames().contains(COL_NAME15)).isTrue(); + } assertThat(tableMetadata.getColumnDataType(COL_NAME1)).isEqualTo(DataType.INT); assertThat(tableMetadata.getColumnDataType(COL_NAME2)).isEqualTo(DataType.TEXT); @@ -194,6 +219,12 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(tableMetadata.getColumnDataType(COL_NAME9)).isEqualTo(DataType.DOUBLE); assertThat(tableMetadata.getColumnDataType(COL_NAME10)).isEqualTo(DataType.BOOLEAN); assertThat(tableMetadata.getColumnDataType(COL_NAME11)).isEqualTo(DataType.BLOB); + assertThat(tableMetadata.getColumnDataType(COL_NAME12)).isEqualTo(DataType.DATE); + assertThat(tableMetadata.getColumnDataType(COL_NAME13)).isEqualTo(DataType.TIME); + assertThat(tableMetadata.getColumnDataType(COL_NAME14)).isEqualTo(DataType.TIMESTAMPTZ); + if (isTimestampTypeSupported()) { + assertThat(tableMetadata.getColumnDataType(COL_NAME15)).isEqualTo(DataType.TIMESTAMP); + } assertThat(tableMetadata.getClusteringOrder(COL_NAME1)).isNull(); assertThat(tableMetadata.getClusteringOrder(COL_NAME2)).isNull(); @@ -206,6 +237,10 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(tableMetadata.getClusteringOrder(COL_NAME9)).isNull(); assertThat(tableMetadata.getClusteringOrder(COL_NAME10)).isNull(); assertThat(tableMetadata.getClusteringOrder(COL_NAME11)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME12)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME13)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME14)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME15)).isNull(); assertThat(tableMetadata.getSecondaryIndexNames().size()).isEqualTo(2); assertThat(tableMetadata.getSecondaryIndexNames().contains(COL_NAME5)).isTrue(); @@ -472,7 +507,7 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre try { // Arrange Map options = getCreationOptions(); - TableMetadata metadata = + TableMetadata.Builder metadataBuilder = TableMetadata.newBuilder() .addColumn(COL_NAME1, DataType.INT) .addColumn(COL_NAME2, DataType.INT) @@ -483,12 +518,18 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre .addColumn(COL_NAME7, DataType.BOOLEAN) .addColumn(COL_NAME8, DataType.BLOB) .addColumn(COL_NAME9, DataType.TEXT) + .addColumn(COL_NAME10, DataType.DATE) + .addColumn(COL_NAME11, DataType.TIME) + .addColumn(COL_NAME12, DataType.TIMESTAMPTZ) .addPartitionKey(COL_NAME1) - .addSecondaryIndex(COL_NAME9) - .build(); + .addSecondaryIndex(COL_NAME9); + if (isTimestampTypeSupported()) { + metadataBuilder = metadataBuilder.addColumn(COL_NAME13, DataType.TIMESTAMP); + } + TableMetadata metadata = metadataBuilder.build(); admin.createTable(namespace1, TABLE4, metadata, options); storage = storageFactory.getStorage(); - storage.put( + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace1) .table(TABLE4) @@ -501,7 +542,19 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre .booleanValue(COL_NAME7, true) .blobValue(COL_NAME8, "8".getBytes(StandardCharsets.UTF_8)) .textValue(COL_NAME9, "9") - .build()); + .dateValue(COL_NAME10, LocalDate.of(2020, 6, 2)) + .timeValue(COL_NAME11, LocalTime.of(12, 2, 6, 123_456_000)) + .timestampTZValue( + COL_NAME12, + LocalDateTime.of(LocalDate.of(2020, 6, 2), LocalTime.of(12, 2, 6, 123_000_000)) + .toInstant(ZoneOffset.UTC)); + if (isTimestampTypeSupported()) { + put.timestampValue( + COL_NAME13, + LocalDateTime.of(LocalDate.of(2020, 6, 2), LocalTime.of(12, 2, 6, 123_000_000))); + } + storage.put(put.build()); + // Act admin.createIndex(namespace1, TABLE4, COL_NAME2, options); admin.createIndex(namespace1, TABLE4, COL_NAME3, options); @@ -512,6 +565,12 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre admin.createIndex(namespace1, TABLE4, COL_NAME7, options); } admin.createIndex(namespace1, TABLE4, COL_NAME8, options); + admin.createIndex(namespace1, TABLE4, COL_NAME10, options); + admin.createIndex(namespace1, TABLE4, COL_NAME11, options); + admin.createIndex(namespace1, TABLE4, COL_NAME12, options); + if (isTimestampTypeSupported()) { + admin.createIndex(namespace1, TABLE4, COL_NAME13, options); + } // Assert assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME2)).isTrue(); @@ -523,16 +582,39 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME7)).isTrue(); } assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME8)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME9)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME10)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME11)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME12)).isTrue(); + if (isTimestampTypeSupported()) { + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME13)).isTrue(); + } + + Set actualSecondaryIndexNames = + admin.getTableMetadata(namespace1, TABLE4).getSecondaryIndexNames(); + assertThat(actualSecondaryIndexNames) + .contains( + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME8, + COL_NAME9, + COL_NAME10, + COL_NAME11, + COL_NAME12); + int indexCount = 10; if (isIndexOnBooleanColumnSupported()) { - assertThat(admin.getTableMetadata(namespace1, TABLE4).getSecondaryIndexNames()) - .containsOnly( - COL_NAME2, COL_NAME3, COL_NAME4, COL_NAME5, COL_NAME6, COL_NAME7, COL_NAME8, - COL_NAME9); - } else { - assertThat(admin.getTableMetadata(namespace1, TABLE4).getSecondaryIndexNames()) - .containsOnly( - COL_NAME2, COL_NAME3, COL_NAME4, COL_NAME5, COL_NAME6, COL_NAME8, COL_NAME9); + assertThat(actualSecondaryIndexNames).contains(COL_NAME7); + indexCount++; } + if (isTimestampTypeSupported()) { + assertThat(actualSecondaryIndexNames).contains(COL_NAME13); + indexCount++; + } + assertThat(actualSecondaryIndexNames).hasSize(indexCount); + } finally { admin.dropTable(namespace1, TABLE4, true); if (storage != null) { @@ -810,4 +892,8 @@ public void addNewColumnToTable_ForAlreadyExistingColumn_ShouldThrowIllegalArgum protected boolean isIndexOnBooleanColumnSupported() { return true; } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminRepairIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminRepairIntegrationTestBase.java index c1d051aa29..f916896910 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminRepairIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageAdminRepairIntegrationTestBase.java @@ -37,32 +37,46 @@ public abstract class DistributedStorageAdminRepairIntegrationTestBase { private static final String COL_NAME9 = "c9"; private static final String COL_NAME10 = "c10"; private static final String COL_NAME11 = "c11"; - - protected static final TableMetadata TABLE_METADATA = - TableMetadata.newBuilder() - .addColumn(COL_NAME1, DataType.INT) - .addColumn(COL_NAME2, DataType.TEXT) - .addColumn(COL_NAME3, DataType.TEXT) - .addColumn(COL_NAME4, DataType.INT) - .addColumn(COL_NAME5, DataType.INT) - .addColumn(COL_NAME6, DataType.TEXT) - .addColumn(COL_NAME7, DataType.BIGINT) - .addColumn(COL_NAME8, DataType.FLOAT) - .addColumn(COL_NAME9, DataType.DOUBLE) - .addColumn(COL_NAME10, DataType.BOOLEAN) - .addColumn(COL_NAME11, DataType.BLOB) - .addPartitionKey(COL_NAME2) - .addPartitionKey(COL_NAME1) - .addClusteringKey(COL_NAME4, Scan.Ordering.Order.ASC) - .addClusteringKey(COL_NAME3, Scan.Ordering.Order.DESC) - .addSecondaryIndex(COL_NAME5) - .addSecondaryIndex(COL_NAME6) - .build(); + private static final String COL_NAME12 = "c12"; + private static final String COL_NAME13 = "c13"; + private static final String COL_NAME14 = "c14"; + private static final String COL_NAME15 = "c15"; protected DistributedStorageAdmin admin; protected AdminTestUtils adminTestUtils = null; + protected TableMetadata getTableMetadata() { + TableMetadata.Builder builder = + TableMetadata.newBuilder() + .addColumn(COL_NAME1, DataType.INT) + .addColumn(COL_NAME2, DataType.TEXT) + .addColumn(COL_NAME3, DataType.TEXT) + .addColumn(COL_NAME4, DataType.INT) + .addColumn(COL_NAME5, DataType.INT) + .addColumn(COL_NAME6, DataType.TEXT) + .addColumn(COL_NAME7, DataType.BIGINT) + .addColumn(COL_NAME8, DataType.FLOAT) + .addColumn(COL_NAME9, DataType.DOUBLE) + .addColumn(COL_NAME10, DataType.BOOLEAN) + .addColumn(COL_NAME11, DataType.BLOB) + .addColumn(COL_NAME12, DataType.DATE) + .addColumn(COL_NAME13, DataType.TIME) + .addColumn(COL_NAME14, DataType.TIMESTAMPTZ); + if (isTimestampTypeSupported()) { + builder.addColumn(COL_NAME15, DataType.TIMESTAMP); + } + builder + .addPartitionKey(COL_NAME2) + .addPartitionKey(COL_NAME1) + .addClusteringKey(COL_NAME4, Scan.Ordering.Order.ASC) + .addClusteringKey(COL_NAME3, Scan.Ordering.Order.DESC) + .addSecondaryIndex(COL_NAME5) + .addSecondaryIndex(COL_NAME6) + .build(); + return builder.build(); + } + @BeforeAll public void beforeAll() throws Exception { initialize(TEST_NAME); @@ -105,7 +119,7 @@ protected String getTable() { private void createTable() throws ExecutionException { Map options = getCreationOptions(); admin.createNamespace(getNamespace(), options); - admin.createTable(getNamespace(), getTable(), TABLE_METADATA, options); + admin.createTable(getNamespace(), getTable(), getTableMetadata(), options); } protected Map getCreationOptions() { @@ -135,11 +149,11 @@ protected void waitForDifferentSessionDdl() { @Test public void repairTable_ForExistingTableAndMetadata_ShouldDoNothing() throws Exception { // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); } @Test @@ -149,12 +163,12 @@ public void repairTable_ForDeletedMetadataTable_ShouldRepairProperly() throws Ex // Act waitForDifferentSessionDdl(); - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert waitForDifferentSessionDdl(); assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); } @Test @@ -163,11 +177,11 @@ public void repairTable_ForTruncatedMetadataTable_ShouldRepairProperly() throws adminTestUtils.truncateMetadataTable(); // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); } @Test @@ -176,11 +190,11 @@ public void repairTable_ForCorruptedMetadataTable_ShouldRepairProperly() throws adminTestUtils.corruptMetadata(getNamespace(), getTable()); // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); } @Test @@ -191,12 +205,12 @@ public void repairTable_ForNonExistingTableButExistingMetadata_ShouldCreateTable // Act waitForDifferentSessionDdl(); - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert waitForDifferentSessionDdl(); assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); } @Test @@ -272,4 +286,8 @@ public void repairNamespace_ForNonExistingNamespaceAndMetadata_ShouldCreateNames assertThat(adminTestUtils.namespaceExists(getNamespace())).isTrue(); assertThat(admin.namespaceExists(getNamespace())).isTrue(); } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageColumnValueIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageColumnValueIntegrationTestBase.java index 81d686f712..8c14cbae33 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageColumnValueIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageColumnValueIntegrationTestBase.java @@ -8,23 +8,37 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.service.StorageFactory; import com.scalar.db.util.TestUtils; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Random; +import java.util.TimeZone; +import javax.annotation.Nullable; +import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +58,10 @@ public abstract class DistributedStorageColumnValueIntegrationTestBase { private static final String COL_NAME5 = "c5"; private static final String COL_NAME6 = "c6"; private static final String COL_NAME7 = "c7"; + private static final String COL_NAME8 = "c8"; + private static final String COL_NAME9 = "c9"; + private static final String COL_NAME10 = "c10"; + private static final String COL_NAME11 = "c11"; private static final int ATTEMPT_COUNT = 50; private static final Random random = new Random(); @@ -77,9 +95,7 @@ protected String getNamespace() { private void createTable() throws ExecutionException { Map options = getCreationOptions(); admin.createNamespace(namespace, true, options); - admin.createTable( - namespace, - TABLE, + TableMetadata.Builder metadata = TableMetadata.newBuilder() .addColumn(PARTITION_KEY, DataType.INT) .addColumn(COL_NAME1, DataType.BOOLEAN) @@ -89,10 +105,14 @@ private void createTable() throws ExecutionException { .addColumn(COL_NAME5, DataType.DOUBLE) .addColumn(COL_NAME6, DataType.TEXT) .addColumn(COL_NAME7, DataType.BLOB) - .addPartitionKey(PARTITION_KEY) - .build(), - true, - options); + .addColumn(COL_NAME8, DataType.DATE) + .addColumn(COL_NAME9, DataType.TIME) + .addColumn(COL_NAME10, DataType.TIMESTAMPTZ) + .addPartitionKey(PARTITION_KEY); + if (isTimestampTypeSupported()) { + metadata.addColumn(COL_NAME11, DataType.TIMESTAMP); + } + admin.createTable(namespace, TABLE, metadata.build(), true, options); } protected Map getCreationOptions() { @@ -155,7 +175,18 @@ public void put_WithRandomValues_ShouldPutCorrectly() throws ExecutionException (TextColumn) getColumnWithRandomValue(random, COL_NAME6, DataType.TEXT); BlobColumn col7Value = (BlobColumn) getColumnWithRandomValue(random, COL_NAME7, DataType.BLOB); - Put put = + DateColumn col8Value = + (DateColumn) getColumnWithRandomValue(random, COL_NAME8, DataType.DATE); + TimeColumn col9Value = + (TimeColumn) getColumnWithRandomValue(random, COL_NAME9, DataType.TIME); + TimestampTZColumn column10Value = + (TimestampTZColumn) getColumnWithRandomValue(random, COL_NAME10, DataType.TIMESTAMPTZ); + TimestampColumn column11Value = null; + if (isTimestampTypeSupported()) { + column11Value = + (TimestampColumn) getColumnWithRandomValue(random, COL_NAME11, DataType.TIMESTAMP); + } + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace) .table(TABLE) @@ -167,9 +198,14 @@ public void put_WithRandomValues_ShouldPutCorrectly() throws ExecutionException .value(col5Value) .value(col6Value) .value(col7Value) - .build(); + .value(col8Value) + .value(col9Value) + .value(column10Value); + if (isTimestampTypeSupported()) { + put.value(column11Value); + } // Act - storage.put(put); + storage.put(put.build()); // Assert assertResult( @@ -180,7 +216,11 @@ public void put_WithRandomValues_ShouldPutCorrectly() throws ExecutionException col4Value, col5Value, col6Value, - col7Value); + col7Value, + col8Value, + col9Value, + column10Value, + column11Value); } } @@ -195,8 +235,16 @@ public void put_WithMaxValues_ShouldPutCorrectly() throws ExecutionException { DoubleColumn col5Value = (DoubleColumn) getColumnWithMaxValue(COL_NAME5, DataType.DOUBLE); TextColumn col6Value = (TextColumn) getColumnWithMaxValue(COL_NAME6, DataType.TEXT); BlobColumn col7Value = (BlobColumn) getColumnWithMaxValue(COL_NAME7, DataType.BLOB); + DateColumn col8Value = (DateColumn) getColumnWithMaxValue(COL_NAME8, DataType.DATE); + TimeColumn col9Value = (TimeColumn) getColumnWithMaxValue(COL_NAME9, DataType.TIME); + TimestampTZColumn col10Value = + (TimestampTZColumn) getColumnWithMaxValue(COL_NAME10, DataType.TIMESTAMPTZ); + TimestampColumn column11Value = null; + if (isTimestampTypeSupported()) { + column11Value = (TimestampColumn) getColumnWithMaxValue(COL_NAME11, DataType.TIMESTAMP); + } - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace) .table(TABLE) @@ -208,9 +256,14 @@ public void put_WithMaxValues_ShouldPutCorrectly() throws ExecutionException { .value(col5Value) .value(col6Value) .value(col7Value) - .build(); + .value(col8Value) + .value(col9Value) + .value(col10Value); + if (isTimestampTypeSupported()) { + put.value(column11Value); + } // Act - storage.put(put); + storage.put(put.build()); // Assert assertResult( @@ -221,7 +274,11 @@ public void put_WithMaxValues_ShouldPutCorrectly() throws ExecutionException { col4Value, col5Value, col6Value, - col7Value); + col7Value, + col8Value, + col9Value, + col10Value, + column11Value); } @Test @@ -235,8 +292,16 @@ public void put_WithMinValues_ShouldPutCorrectly() throws ExecutionException { DoubleColumn col5Value = (DoubleColumn) getColumnWithMinValue(COL_NAME5, DataType.DOUBLE); TextColumn col6Value = (TextColumn) getColumnWithMinValue(COL_NAME6, DataType.TEXT); BlobColumn col7Value = (BlobColumn) getColumnWithMinValue(COL_NAME7, DataType.BLOB); + DateColumn col8Value = (DateColumn) getColumnWithMinValue(COL_NAME8, DataType.DATE); + TimeColumn col9Value = (TimeColumn) getColumnWithMinValue(COL_NAME9, DataType.TIME); + TimestampTZColumn col10Value = + (TimestampTZColumn) getColumnWithMinValue(COL_NAME10, DataType.TIMESTAMPTZ); + TimestampColumn column11Value = null; + if (isTimestampTypeSupported()) { + column11Value = (TimestampColumn) getColumnWithMinValue(COL_NAME11, DataType.TIMESTAMP); + } - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace) .table(TABLE) @@ -248,9 +313,14 @@ public void put_WithMinValues_ShouldPutCorrectly() throws ExecutionException { .value(col5Value) .value(col6Value) .value(col7Value) - .build(); + .value(col8Value) + .value(col9Value) + .value(col10Value); + if (isTimestampTypeSupported()) { + put.value(column11Value); + } // Act - storage.put(put); + storage.put(put.build()); // Assert assertResult( @@ -261,7 +331,11 @@ public void put_WithMinValues_ShouldPutCorrectly() throws ExecutionException { col4Value, col5Value, col6Value, - col7Value); + col7Value, + col8Value, + col9Value, + col10Value, + column11Value); } @Test @@ -275,8 +349,15 @@ public void put_WithNullValues_ShouldPutCorrectly() throws ExecutionException { DoubleColumn col5Value = DoubleColumn.ofNull(COL_NAME5); TextColumn col6Value = TextColumn.ofNull(COL_NAME6); BlobColumn col7Value = BlobColumn.ofNull(COL_NAME7); + DateColumn col8Value = DateColumn.ofNull(COL_NAME8); + TimeColumn col9Value = TimeColumn.ofNull(COL_NAME9); + TimestampTZColumn col10Value = TimestampTZColumn.ofNull(COL_NAME10); + TimestampColumn col11Value = null; + if (isTimestampTypeSupported()) { + col11Value = TimestampColumn.ofNull(COL_NAME11); + } - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace) .table(TABLE) @@ -288,9 +369,14 @@ public void put_WithNullValues_ShouldPutCorrectly() throws ExecutionException { .value(col5Value) .value(col6Value) .value(col7Value) - .build(); + .value(col8Value) + .value(col9Value) + .value(col10Value); + if (isTimestampTypeSupported()) { + put.value(col11Value); + } // Act - storage.put(put); + storage.put(put.build()); // Assert assertResult( @@ -301,7 +387,11 @@ public void put_WithNullValues_ShouldPutCorrectly() throws ExecutionException { col4Value, col5Value, col6Value, - col7Value); + col7Value, + col8Value, + col9Value, + col10Value, + col11Value); } @Test @@ -316,8 +406,15 @@ public void put_WithNullValues_AfterPuttingRandomValues_ShouldPutCorrectly() DoubleColumn col5Value = DoubleColumn.ofNull(COL_NAME5); TextColumn col6Value = TextColumn.ofNull(COL_NAME6); BlobColumn col7Value = BlobColumn.ofNull(COL_NAME7); + DateColumn col8Value = DateColumn.ofNull(COL_NAME8); + TimeColumn col9Value = TimeColumn.ofNull(COL_NAME9); + TimestampTZColumn col10Value = TimestampTZColumn.ofNull(COL_NAME10); + TimestampColumn col11Value = null; + if (isTimestampTypeSupported()) { + col11Value = TimestampColumn.ofNull(COL_NAME11); + } - Put putForRandomValues = + PutBuilder.Buildable putForRandomValues = Put.newBuilder() .namespace(namespace) .table(TABLE) @@ -329,8 +426,13 @@ public void put_WithNullValues_AfterPuttingRandomValues_ShouldPutCorrectly() .value(getColumnWithRandomValue(random, COL_NAME5, DataType.DOUBLE)) .value(getColumnWithRandomValue(random, COL_NAME6, DataType.TEXT)) .value(getColumnWithRandomValue(random, COL_NAME7, DataType.BLOB)) - .build(); - Put putForNullValues = + .value(getColumnWithRandomValue(random, COL_NAME8, DataType.DATE)) + .value(getColumnWithRandomValue(random, COL_NAME9, DataType.TIME)) + .value(getColumnWithRandomValue(random, COL_NAME10, DataType.TIMESTAMPTZ)); + if (isTimestampTypeSupported()) { + putForRandomValues.value(getColumnWithRandomValue(random, COL_NAME11, DataType.TIMESTAMP)); + } + PutBuilder.Buildable putForNullValues = Put.newBuilder() .namespace(namespace) .table(TABLE) @@ -342,11 +444,16 @@ public void put_WithNullValues_AfterPuttingRandomValues_ShouldPutCorrectly() .value(col5Value) .value(col6Value) .value(col7Value) - .build(); + .value(col8Value) + .value(col9Value) + .value(col10Value); + if (isTimestampTypeSupported()) { + putForNullValues.value(col11Value); + } // Act - storage.put(putForRandomValues); - storage.put(putForNullValues); + storage.put(putForRandomValues.build()); + storage.put(putForNullValues.build()); // Assert assertResult( @@ -357,7 +464,11 @@ public void put_WithNullValues_AfterPuttingRandomValues_ShouldPutCorrectly() col4Value, col5Value, col6Value, - col7Value); + col7Value, + col8Value, + col9Value, + col10Value, + col11Value); } @Test @@ -371,6 +482,13 @@ public void put_WithoutValues_ShouldPutCorrectly() throws ExecutionException { DoubleColumn col5Value = DoubleColumn.ofNull(COL_NAME5); TextColumn col6Value = TextColumn.ofNull(COL_NAME6); BlobColumn col7Value = BlobColumn.ofNull(COL_NAME7); + DateColumn col8Value = DateColumn.ofNull(COL_NAME8); + TimeColumn col9Value = TimeColumn.ofNull(COL_NAME9); + TimestampTZColumn col10Value = TimestampTZColumn.ofNull(COL_NAME10); + TimestampColumn col11Value = null; + if (isTimestampTypeSupported()) { + col11Value = TimestampColumn.ofNull(COL_NAME11); + } Put put = Put.newBuilder() @@ -391,7 +509,180 @@ public void put_WithoutValues_ShouldPutCorrectly() throws ExecutionException { col4Value, col5Value, col6Value, - col7Value); + col7Value, + col8Value, + col9Value, + col10Value, + col11Value); + } + + @Test + public void + put_WithProblematicDateBecauseOfJulianToGregorianCalendarTransition_ShouldPutCorrectly() + throws ExecutionException { + // This test only targets the DATE, TIMESTAMP and TIMESTAMPTZ types + // + // The interval of dates below can be problematic because they mark the transition from the + // Julian to the Gregorian calendar. + // For dates before the introduction of the Gregorian Calendar in October 15th, 1582. The JDBC + // driver and Java internal time representation differs which can cause time adjustment issues + // depending on the way a DATE, TIMESTAMP or TIMESTAMPTZ is inserted into the database. + + // Arrange + LocalDate start = LocalDate.of(1582, 10, 4); + LocalDate end = LocalDate.of(1582, 10, 16); + + for (LocalDate problematicDate = start; + problematicDate.isBefore(end); + problematicDate = problematicDate.plusDays(1)) { + IntColumn partitionKeyValue = IntColumn.of(PARTITION_KEY, problematicDate.getDayOfMonth()); + BooleanColumn col1Value = BooleanColumn.ofNull(COL_NAME1); + IntColumn col2Value = IntColumn.ofNull(COL_NAME2); + BigIntColumn col3Value = BigIntColumn.ofNull(COL_NAME3); + FloatColumn col4Value = FloatColumn.ofNull(COL_NAME4); + DoubleColumn col5Value = DoubleColumn.ofNull(COL_NAME5); + TextColumn col6Value = TextColumn.ofNull(COL_NAME6); + BlobColumn col7Value = BlobColumn.ofNull(COL_NAME7); + DateColumn col8Value = DateColumn.of(COL_NAME8, problematicDate); + TimeColumn col9Value = TimeColumn.ofNull(COL_NAME9); + TimestampTZColumn col10Value = + TimestampTZColumn.of( + COL_NAME10, + LocalDateTime.of(problematicDate, LocalTime.MAX) + .withNano(123_000_000) + .toInstant(ZoneOffset.UTC)); + TimestampColumn column11Value = null; + if (isTimestampTypeSupported()) { + column11Value = + TimestampColumn.of( + COL_NAME11, LocalDateTime.of(problematicDate, LocalTime.MAX).withNano(123_000_000)); + } + + PutBuilder.Buildable put = + Put.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.newBuilder().add(partitionKeyValue).build()) + .value(col1Value) + .value(col2Value) + .value(col3Value) + .value(col4Value) + .value(col5Value) + .value(col6Value) + .value(col7Value) + .value(col8Value) + .value(col9Value) + .value(col10Value); + if (isTimestampTypeSupported()) { + put.value(column11Value); + } + // Act + storage.put(put.build()); + + // Assert + assertResult( + partitionKeyValue, + col1Value, + col2Value, + col3Value, + col4Value, + col5Value, + col6Value, + col7Value, + col8Value, + col9Value, + col10Value, + column11Value); + } + } + + @ParameterizedTest + @CsvSource({"-2,11", "11,11"}) + public void put_forTimeRelatedTypesWithVariousJvmTimezone_ShouldPutCorrectly( + int insertTimeZone, int readTimeZone) throws ExecutionException { + TimeZone originalDefaultTimezone = TimeZone.getDefault(); + try { + // Different time zones between the insert client, read client and server can + // cause issue where the time can be offset. Such issues were observed for MySQL and MariaDB. + // + // Ideally we would run this test by setting the server time zone in additions to the insert + // and read client timezone. But setting the server time zone is complicated and the server is + // likely running on the Japan timezone (UTC+9), UTC or a US mainland timezone(UTC-8 to + // UTC-5). + // So we use UTC-2 or UTC+11 timezones for the client, that corresponds respectively to + // timezone + // in the middle of the Atlantic Ocean and the Pacific Ocean, which should never align on the + // server timezone. + + // Arrange + // Set JVM default time zone for inserting + TimeZone.setDefault(TimeZone.getTimeZone(ZoneOffset.ofHours(insertTimeZone))); + + LocalDate anyDate = LocalDate.of(2000, 5, 6); + LocalTime anyTime = LocalTime.of(12, 13, 14, 123_456_000); + IntColumn partitionKeyValue = IntColumn.of(PARTITION_KEY, 1); + BooleanColumn col1Value = BooleanColumn.ofNull(COL_NAME1); + IntColumn col2Value = IntColumn.ofNull(COL_NAME2); + BigIntColumn col3Value = BigIntColumn.ofNull(COL_NAME3); + FloatColumn col4Value = FloatColumn.ofNull(COL_NAME4); + DoubleColumn col5Value = DoubleColumn.ofNull(COL_NAME5); + TextColumn col6Value = TextColumn.ofNull(COL_NAME6); + BlobColumn col7Value = BlobColumn.ofNull(COL_NAME7); + DateColumn col8Value = DateColumn.of(COL_NAME8, anyDate); + TimeColumn col9Value = TimeColumn.of(COL_NAME9, anyTime); + TimestampTZColumn col10Value = + TimestampTZColumn.of( + COL_NAME10, + LocalDateTime.of(anyDate, anyTime).withNano(123_000_000).toInstant(ZoneOffset.UTC)); + TimestampColumn column11Value = null; + if (isTimestampTypeSupported()) { + column11Value = + TimestampColumn.of( + COL_NAME11, LocalDateTime.of(anyDate, anyTime).withNano(123_000_000)); + } + + PutBuilder.Buildable put = + Put.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.newBuilder().add(partitionKeyValue).build()) + .value(col1Value) + .value(col2Value) + .value(col3Value) + .value(col4Value) + .value(col5Value) + .value(col6Value) + .value(col7Value) + .value(col8Value) + .value(col9Value) + .value(col10Value); + if (isTimestampTypeSupported()) { + put.value(column11Value); + } + // Act + storage.put(put.build()); + + // Assert + // Set JVM default time zone for reading + TimeZone.setDefault(TimeZone.getTimeZone(ZoneOffset.ofHours(readTimeZone))); + + assertResult( + partitionKeyValue, + col1Value, + col2Value, + col3Value, + col4Value, + col5Value, + col6Value, + col7Value, + col8Value, + col9Value, + col10Value, + column11Value); + } finally { + // Reset JVM default time zone to the original value + TimeZone.setDefault(originalDefaultTimezone); + } } private void assertResult( @@ -402,8 +693,14 @@ private void assertResult( FloatColumn col4Value, DoubleColumn col5Value, TextColumn col6Value, - BlobColumn col7Value) + BlobColumn col7Value, + DateColumn col8Value, + TimeColumn col9Value, + TimestampTZColumn col10Value, + @Nullable TimestampColumn col11Value) throws ExecutionException { + assert (isTimestampTypeSupported() == (col11Value != null)); + Optional actualOpt = storage.get( Get.newBuilder() @@ -432,9 +729,18 @@ private void assertResult( assertThat(actual.getColumns().get(COL_NAME6)).isEqualTo(col6Value); assertThat(actual.contains(COL_NAME7)).isTrue(); assertThat(actual.getColumns().get(COL_NAME7)).isEqualTo(col7Value); - - assertThat(actual.getContainedColumnNames()) - .containsExactlyInAnyOrder( + assertThat(actual.contains(COL_NAME8)).isTrue(); + assertThat(actual.getColumns().get(COL_NAME8)).isEqualTo(col8Value); + assertThat(actual.contains(COL_NAME9)).isTrue(); + assertThat(actual.getColumns().get(COL_NAME9)).isEqualTo(col9Value); + assertThat(actual.contains(COL_NAME10)).isTrue(); + assertThat(actual.getColumns().get(COL_NAME10)).isEqualTo(col10Value); + if (isTimestampTypeSupported()) { + assertThat(actual.contains(COL_NAME11)).isTrue(); + assertThat(actual.getColumns().get(COL_NAME11)).isEqualTo(col11Value); + } + List expectedColumnNames = + Lists.newArrayList( PARTITION_KEY, COL_NAME1, COL_NAME2, @@ -442,7 +748,15 @@ private void assertResult( COL_NAME4, COL_NAME5, COL_NAME6, - COL_NAME7); + COL_NAME7, + COL_NAME8, + COL_NAME9, + COL_NAME10); + if (isTimestampTypeSupported()) { + expectedColumnNames.add(COL_NAME11); + } + assertThat(actual.getContainedColumnNames()) + .containsExactlyInAnyOrder(expectedColumnNames.toArray(new String[0])); assertThat(actual.contains(PARTITION_KEY)).isTrue(); assertThat(actual.isNull(PARTITION_KEY)).isFalse(); @@ -490,6 +804,28 @@ private void assertResult( assertThat(actual.getBlobAsByteBuffer(COL_NAME7)).isEqualTo(col7Value.getBlobValue()); assertThat(actual.getBlobAsBytes(COL_NAME7)).isEqualTo(col7Value.getBlobValueAsBytes()); assertThat(actual.getAsObject(COL_NAME7)).isEqualTo(col7Value.getBlobValueAsByteBuffer()); + + assertThat(actual.contains(COL_NAME8)).isTrue(); + assertThat(actual.isNull(COL_NAME8)).isEqualTo(col8Value.hasNullValue()); + assertThat(actual.getDate(COL_NAME8)).isEqualTo(col8Value.getDateValue()); + assertThat(actual.getAsObject(COL_NAME8)).isEqualTo(col8Value.getDateValue()); + + assertThat(actual.contains(COL_NAME9)).isTrue(); + assertThat(actual.isNull(COL_NAME9)).isEqualTo(col9Value.hasNullValue()); + assertThat(actual.getTime(COL_NAME9)).isEqualTo(col9Value.getTimeValue()); + assertThat(actual.getAsObject(COL_NAME9)).isEqualTo(col9Value.getTimeValue()); + + assertThat(actual.contains(COL_NAME10)).isTrue(); + assertThat(actual.isNull(COL_NAME10)).isEqualTo(col10Value.hasNullValue()); + assertThat(actual.getTimestampTZ(COL_NAME10)).isEqualTo(col10Value.getTimestampTZValue()); + assertThat(actual.getAsObject(COL_NAME10)).isEqualTo(col10Value.getTimestampTZValue()); + + if (isTimestampTypeSupported()) { + assertThat(actual.contains(COL_NAME11)).isTrue(); + assertThat(actual.isNull(COL_NAME11)).isEqualTo(col11Value.hasNullValue()); + assertThat(actual.getTimestamp(COL_NAME11)).isEqualTo(col11Value.getTimestampValue()); + assertThat(actual.getAsObject(COL_NAME11)).isEqualTo(col11Value.getTimestampValue()); + } } protected Column getColumnWithRandomValue( @@ -504,4 +840,8 @@ protected Column getColumnWithMinValue(String columnName, DataType dataType) protected Column getColumnWithMaxValue(String columnName, DataType dataType) { return TestUtils.getColumnWithMaxValue(columnName, dataType); } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageConditionalMutationIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageConditionalMutationIntegrationTestBase.java index 21b87ba2d5..c486c9172e 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageConditionalMutationIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageConditionalMutationIntegrationTestBase.java @@ -5,8 +5,8 @@ import static org.assertj.core.api.Assertions.catchThrowable; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; import com.google.common.collect.Ordering; +import com.google.common.collect.Sets; import com.scalar.db.api.ConditionalExpression.Operator; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.exception.storage.NoMutationException; @@ -15,11 +15,15 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.service.StorageFactory; import com.scalar.db.util.TestUtils; import edu.umd.cs.findbugs.annotations.Nullable; @@ -31,10 +35,12 @@ import java.util.Optional; import java.util.Properties; import java.util.Random; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -59,19 +65,10 @@ public abstract class DistributedStorageConditionalMutationIntegrationTestBase { private static final String COL_NAME5 = "c5"; private static final String COL_NAME6 = "c6"; private static final String COL_NAME7 = "c7"; - - private static final TableMetadata TABLE_METADATA = - TableMetadata.newBuilder() - .addColumn(PARTITION_KEY, DataType.TEXT) - .addColumn(COL_NAME1, DataType.BOOLEAN) - .addColumn(COL_NAME2, DataType.INT) - .addColumn(COL_NAME3, DataType.BIGINT) - .addColumn(COL_NAME4, DataType.FLOAT) - .addColumn(COL_NAME5, DataType.DOUBLE) - .addColumn(COL_NAME6, DataType.TEXT) - .addColumn(COL_NAME7, DataType.BLOB) - .addPartitionKey(PARTITION_KEY) - .build(); + private static final String COL_NAME8 = "c8"; + private static final String COL_NAME9 = "c9"; + private static final String COL_NAME10 = "c10"; + private static final String COL_NAME11 = "c11"; private static final int ATTEMPT_COUNT = 5; private static final int THREAD_NUM = 10; @@ -115,9 +112,27 @@ protected int getThreadNum() { } private void createTable() throws ExecutionException { + TableMetadata.Builder tableMetadata = + TableMetadata.newBuilder() + .addColumn(PARTITION_KEY, DataType.TEXT) + .addColumn(COL_NAME1, DataType.BOOLEAN) + .addColumn(COL_NAME2, DataType.INT) + .addColumn(COL_NAME3, DataType.BIGINT) + .addColumn(COL_NAME4, DataType.FLOAT) + .addColumn(COL_NAME5, DataType.DOUBLE) + .addColumn(COL_NAME6, DataType.TEXT) + .addColumn(COL_NAME7, DataType.BLOB) + .addColumn(COL_NAME8, DataType.DATE) + .addColumn(COL_NAME9, DataType.TIME) + .addColumn(COL_NAME10, DataType.TIMESTAMPTZ) + .addPartitionKey(PARTITION_KEY); + if (isTimestampTypeSupported()) { + tableMetadata.addColumn(COL_NAME11, DataType.TIMESTAMP); + } + Map options = getCreationOptions(); admin.createNamespace(namespace, true, options); - admin.createTable(namespace, TABLE, TABLE_METADATA, true, options); + admin.createTable(namespace, TABLE, tableMetadata.build(), true, options); } protected Map getCreationOptions() { @@ -160,10 +175,15 @@ private void dropTable() throws ExecutionException { } protected List getOperatorAndDataTypeListForTest() { + List dataTypes = Lists.newArrayList(DataType.values()); + if (!isTimestampTypeSupported()) { + dataTypes.remove(DataType.TIMESTAMP); + } + List ret = new ArrayList<>(); for (Operator operator : Operator.values()) { if (operator != Operator.LIKE && operator != Operator.NOT_LIKE) { - for (DataType dataType : DataType.values()) { + for (DataType dataType : dataTypes) { ret.add(new OperatorAndDataType(operator, dataType)); } } @@ -497,18 +517,25 @@ private void put_withPutIf_shouldPutProperly( Optional result = storage.get(get); assertThat(result).describedAs(description).isPresent(); + Set columnNames = + Sets.newHashSet( + PARTITION_KEY, + COL_NAME1, + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME7, + COL_NAME8, + COL_NAME9, + COL_NAME10); + if (isTimestampTypeSupported()) { + columnNames.add(COL_NAME11); + } assertThat(result.get().getContainedColumnNames()) .describedAs(description) - .isEqualTo( - ImmutableSet.of( - PARTITION_KEY, - COL_NAME1, - COL_NAME2, - COL_NAME3, - COL_NAME4, - COL_NAME5, - COL_NAME6, - COL_NAME7)); + .isEqualTo(columnNames); Map> expected = shouldMutate ? put.getColumns() : initialData; assertThat(result.get().isNull(COL_NAME1)) @@ -547,6 +574,20 @@ private void put_withPutIf_shouldPutProperly( assertThat(result.get().getBlob(COL_NAME7)) .describedAs(description) .isEqualTo(expected.get(COL_NAME7).getBlobValue()); + assertThat(result.get().getDate(COL_NAME8)) + .describedAs(description) + .isEqualTo(expected.get(COL_NAME8).getDateValue()); + assertThat(result.get().getTime(COL_NAME9)) + .describedAs(description) + .isEqualTo(expected.get(COL_NAME9).getTimeValue()); + assertThat(result.get().getTimestampTZ(COL_NAME10)) + .describedAs(description) + .isEqualTo(expected.get(COL_NAME10).getTimestampTZValue()); + if (isTimestampTypeSupported()) { + assertThat(result.get().getTimestamp(COL_NAME11)) + .describedAs(description) + .isEqualTo(expected.get(COL_NAME11).getTimestampValue()); + } } @Test @@ -564,17 +605,23 @@ public void put_withPutIfExistsWhenRecordExists_shouldPutProperly() throws Execu // Assert Optional result = storage.get(prepareGet()); assertThat(result).isPresent(); - assertThat(result.get().getContainedColumnNames()) - .isEqualTo( - ImmutableSet.of( - PARTITION_KEY, - COL_NAME1, - COL_NAME2, - COL_NAME3, - COL_NAME4, - COL_NAME5, - COL_NAME6, - COL_NAME7)); + Set columnNames = + Sets.newHashSet( + PARTITION_KEY, + COL_NAME1, + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME7, + COL_NAME8, + COL_NAME9, + COL_NAME10); + if (isTimestampTypeSupported()) { + columnNames.add(COL_NAME11); + } + assertThat(result.get().getContainedColumnNames()).isEqualTo(columnNames); assertThat(result.get().getBoolean(COL_NAME1)).isEqualTo(put.getBooleanValue(COL_NAME1)); assertThat(result.get().getInt(COL_NAME2)).isEqualTo(put.getIntValue(COL_NAME2)); assertThat(result.get().getBigInt(COL_NAME3)).isEqualTo(put.getBigIntValue(COL_NAME3)); @@ -582,6 +629,14 @@ public void put_withPutIfExistsWhenRecordExists_shouldPutProperly() throws Execu assertThat(result.get().getDouble(COL_NAME5)).isEqualTo(put.getDoubleValue(COL_NAME5)); assertThat(result.get().getText(COL_NAME6)).isEqualTo(put.getTextValue(COL_NAME6)); assertThat(result.get().getBlob(COL_NAME7)).isEqualTo(put.getBlobValue(COL_NAME7)); + assertThat(result.get().getDate(COL_NAME8)).isEqualTo(put.getDateValue(COL_NAME8)); + assertThat(result.get().getTime(COL_NAME9)).isEqualTo(put.getTimeValue(COL_NAME9)); + assertThat(result.get().getTimestampTZ(COL_NAME10)) + .isEqualTo(put.getTimestampTZValue(COL_NAME10)); + if (isTimestampTypeSupported()) { + assertThat(result.get().getTimestamp(COL_NAME11)) + .isEqualTo(put.getTimestampValue(COL_NAME11)); + } } @Test @@ -613,17 +668,23 @@ public void put_withPutIfNotExistsWhenRecordDoesNotExist_shouldPutProperly() // Assert Optional result = storage.get(prepareGet()); assertThat(result).isPresent(); - assertThat(result.get().getContainedColumnNames()) - .isEqualTo( - ImmutableSet.of( - PARTITION_KEY, - COL_NAME1, - COL_NAME2, - COL_NAME3, - COL_NAME4, - COL_NAME5, - COL_NAME6, - COL_NAME7)); + Set columnNames = + Sets.newHashSet( + PARTITION_KEY, + COL_NAME1, + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME7, + COL_NAME8, + COL_NAME9, + COL_NAME10); + if (isTimestampTypeSupported()) { + columnNames.add(COL_NAME11); + } + assertThat(result.get().getContainedColumnNames()).isEqualTo(columnNames); assertThat(result.get().getBoolean(COL_NAME1)).isEqualTo(put.getBooleanValue(COL_NAME1)); assertThat(result.get().getInt(COL_NAME2)).isEqualTo(put.getIntValue(COL_NAME2)); assertThat(result.get().getBigInt(COL_NAME3)).isEqualTo(put.getBigIntValue(COL_NAME3)); @@ -631,6 +692,14 @@ public void put_withPutIfNotExistsWhenRecordDoesNotExist_shouldPutProperly() assertThat(result.get().getDouble(COL_NAME5)).isEqualTo(put.getDoubleValue(COL_NAME5)); assertThat(result.get().getText(COL_NAME6)).isEqualTo(put.getTextValue(COL_NAME6)); assertThat(result.get().getBlob(COL_NAME7)).isEqualTo(put.getBlobValue(COL_NAME7)); + assertThat(result.get().getDate(COL_NAME8)).isEqualTo(put.getDateValue(COL_NAME8)); + assertThat(result.get().getTime(COL_NAME9)).isEqualTo(put.getTimeValue(COL_NAME9)); + assertThat(result.get().getTimestampTZ(COL_NAME10)) + .isEqualTo(put.getTimestampTZValue(COL_NAME10)); + if (isTimestampTypeSupported()) { + assertThat(result.get().getTimestamp(COL_NAME11)) + .isEqualTo(put.getTimestampValue(COL_NAME11)); + } } @Test @@ -648,17 +717,23 @@ public void put_withPutIfNotExistsWhenRecordExists_shouldThrowNoMutationExceptio Optional result = storage.get(prepareGet()); assertThat(result).isPresent(); - assertThat(result.get().getContainedColumnNames()) - .isEqualTo( - ImmutableSet.of( - PARTITION_KEY, - COL_NAME1, - COL_NAME2, - COL_NAME3, - COL_NAME4, - COL_NAME5, - COL_NAME6, - COL_NAME7)); + Set columnNames = + Sets.newHashSet( + PARTITION_KEY, + COL_NAME1, + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME7, + COL_NAME8, + COL_NAME9, + COL_NAME10); + if (isTimestampTypeSupported()) { + columnNames.add(COL_NAME11); + } + assertThat(result.get().getContainedColumnNames()).isEqualTo(columnNames); assertThat(result.get().getBoolean(COL_NAME1)) .isEqualTo(initialData.get(COL_NAME1).getBooleanValue()); assertThat(result.get().getInt(COL_NAME2)).isEqualTo(initialData.get(COL_NAME2).getIntValue()); @@ -672,6 +747,16 @@ public void put_withPutIfNotExistsWhenRecordExists_shouldThrowNoMutationExceptio .isEqualTo(initialData.get(COL_NAME6).getTextValue()); assertThat(result.get().getBlob(COL_NAME7)) .isEqualTo(initialData.get(COL_NAME7).getBlobValue()); + assertThat(result.get().getDate(COL_NAME8)) + .isEqualTo(initialData.get(COL_NAME8).getDateValue()); + assertThat(result.get().getTime(COL_NAME9)) + .isEqualTo(initialData.get(COL_NAME9).getTimeValue()); + assertThat(result.get().getTimestampTZ(COL_NAME10)) + .isEqualTo(initialData.get(COL_NAME10).getTimestampTZValue()); + if (isTimestampTypeSupported()) { + assertThat(result.get().getTimestamp(COL_NAME11)) + .isEqualTo(initialData.get(COL_NAME11).getTimestampValue()); + } } @Test @@ -1009,18 +1094,25 @@ private void delete_withDeleteIf_shouldPutProperly( Optional result = storage.get(get); assertThat(result).describedAs(description).isPresent(); + Set columnNames = + Sets.newHashSet( + PARTITION_KEY, + COL_NAME1, + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME7, + COL_NAME8, + COL_NAME9, + COL_NAME10); + if (isTimestampTypeSupported()) { + columnNames.add(COL_NAME11); + } assertThat(result.get().getContainedColumnNames()) .describedAs(description) - .isEqualTo( - ImmutableSet.of( - PARTITION_KEY, - COL_NAME1, - COL_NAME2, - COL_NAME3, - COL_NAME4, - COL_NAME5, - COL_NAME6, - COL_NAME7)); + .isEqualTo(columnNames); assertThat(result.get().isNull(COL_NAME1)) .describedAs(description) @@ -1058,6 +1150,20 @@ private void delete_withDeleteIf_shouldPutProperly( assertThat(result.get().getBlob(COL_NAME7)) .describedAs(description) .isEqualTo(initialData.get(COL_NAME7).getBlobValue()); + assertThat(result.get().getDate(COL_NAME8)) + .describedAs(description) + .isEqualTo(initialData.get(COL_NAME8).getDateValue()); + assertThat(result.get().getTime(COL_NAME9)) + .describedAs(description) + .isEqualTo(initialData.get(COL_NAME9).getTimeValue()); + assertThat(result.get().getTimestampTZ(COL_NAME10)) + .describedAs(description) + .isEqualTo(initialData.get(COL_NAME10).getTimestampTZValue()); + if (isTimestampTypeSupported()) { + assertThat(result.get().getTimestamp(COL_NAME11)) + .describedAs(description) + .isEqualTo(initialData.get(COL_NAME11).getTimestampValue()); + } } } @@ -1129,22 +1235,31 @@ private Put preparePutWithRandomValues( DataType firstDataType, @Nullable Operator secondOperator, @Nullable DataType secondDataType) { - return Put.newBuilder() - .namespace(namespace) - .table(TABLE) - .partitionKey( - Key.ofText( - PARTITION_KEY, - getPartitionKeyValue(firstOperator, firstDataType, secondOperator, secondDataType))) - .value(getColumnWithRandomValue(random.get(), COL_NAME1, DataType.BOOLEAN)) - .value(getColumnWithRandomValue(random.get(), COL_NAME2, DataType.INT)) - .value(getColumnWithRandomValue(random.get(), COL_NAME3, DataType.BIGINT)) - .value(getColumnWithRandomValue(random.get(), COL_NAME4, DataType.FLOAT)) - .value(getColumnWithRandomValue(random.get(), COL_NAME5, DataType.DOUBLE)) - .value(getColumnWithRandomValue(random.get(), COL_NAME6, DataType.TEXT)) - .value(getColumnWithRandomValue(random.get(), COL_NAME7, DataType.BLOB)) - .consistency(Consistency.LINEARIZABLE) - .build(); + PutBuilder.Buildable put = + Put.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey( + Key.ofText( + PARTITION_KEY, + getPartitionKeyValue( + firstOperator, firstDataType, secondOperator, secondDataType))) + .value(getColumnWithRandomValue(random.get(), COL_NAME1, DataType.BOOLEAN)) + .value(getColumnWithRandomValue(random.get(), COL_NAME2, DataType.INT)) + .value(getColumnWithRandomValue(random.get(), COL_NAME3, DataType.BIGINT)) + .value(getColumnWithRandomValue(random.get(), COL_NAME4, DataType.FLOAT)) + .value(getColumnWithRandomValue(random.get(), COL_NAME5, DataType.DOUBLE)) + .value(getColumnWithRandomValue(random.get(), COL_NAME6, DataType.TEXT)) + .value(getColumnWithRandomValue(random.get(), COL_NAME7, DataType.BLOB)) + .value(getColumnWithRandomValue(random.get(), COL_NAME8, DataType.DATE)) + .value(getColumnWithRandomValue(random.get(), COL_NAME9, DataType.TIME)) + .value(getColumnWithRandomValue(random.get(), COL_NAME10, DataType.TIMESTAMPTZ)) + .consistency(Consistency.LINEARIZABLE); + if (isTimestampTypeSupported()) { + put.value(getColumnWithRandomValue(random.get(), COL_NAME11, DataType.TIMESTAMP)); + } + + return put.build(); } private Delete prepareDelete() { @@ -1230,20 +1345,32 @@ private Put preparePutWithNullValues( DataType firstDataType, @Nullable Operator secondOperator, @Nullable DataType secondDataType) { - return new Put( - Key.ofText( - PARTITION_KEY, - getPartitionKeyValue(firstOperator, firstDataType, secondOperator, secondDataType))) - .withBooleanValue(COL_NAME1, null) - .withIntValue(COL_NAME2, null) - .withBigIntValue(COL_NAME3, null) - .withFloatValue(COL_NAME4, null) - .withDoubleValue(COL_NAME5, null) - .withTextValue(COL_NAME6, null) - .withBlobValue(COL_NAME7, (ByteBuffer) null) - .withConsistency(Consistency.LINEARIZABLE) - .forNamespace(namespace) - .forTable(TABLE); + PutBuilder.Buildable put = + Put.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey( + Key.ofText( + PARTITION_KEY, + getPartitionKeyValue( + firstOperator, firstDataType, secondOperator, secondDataType))) + .booleanValue(COL_NAME1, null) + .intValue(COL_NAME2, null) + .bigIntValue(COL_NAME3, null) + .floatValue(COL_NAME4, null) + .doubleValue(COL_NAME5, null) + .textValue(COL_NAME6, null) + .blobValue(COL_NAME7, (ByteBuffer) null) + .dateValue(COL_NAME8, null) + .timeValue(COL_NAME9, null) + .timestampTZValue(COL_NAME10, null) + .consistency(Consistency.LINEARIZABLE); + + if (isTimestampTypeSupported()) { + put.timestampValue(COL_NAME11, null); + } + + return put.build(); } private Map> putInitialDataWithoutValues(Operator operator, DataType dataType) @@ -1269,20 +1396,29 @@ private Map> putInitialDataWithoutValues( preparePutWithoutValues(firstOperator, firstDataType, secondOperator, secondDataType) .withCondition(ConditionBuilder.putIfNotExists()); storage.put(initialPut); - return ImmutableMap.>builder() - .put( - PARTITION_KEY, - TextColumn.of( + ImmutableMap.Builder> columns = + ImmutableMap.>builder() + .put( PARTITION_KEY, - getPartitionKeyValue(firstOperator, firstDataType, secondOperator, secondDataType))) - .put(COL_NAME1, BooleanColumn.ofNull(COL_NAME1)) - .put(COL_NAME2, IntColumn.ofNull(COL_NAME2)) - .put(COL_NAME3, BigIntColumn.ofNull(COL_NAME3)) - .put(COL_NAME4, FloatColumn.ofNull(COL_NAME4)) - .put(COL_NAME5, DoubleColumn.ofNull(COL_NAME5)) - .put(COL_NAME6, TextColumn.ofNull(COL_NAME6)) - .put(COL_NAME7, BlobColumn.ofNull(COL_NAME7)) - .build(); + TextColumn.of( + PARTITION_KEY, + getPartitionKeyValue( + firstOperator, firstDataType, secondOperator, secondDataType))) + .put(COL_NAME1, BooleanColumn.ofNull(COL_NAME1)) + .put(COL_NAME2, IntColumn.ofNull(COL_NAME2)) + .put(COL_NAME3, BigIntColumn.ofNull(COL_NAME3)) + .put(COL_NAME4, FloatColumn.ofNull(COL_NAME4)) + .put(COL_NAME5, DoubleColumn.ofNull(COL_NAME5)) + .put(COL_NAME6, TextColumn.ofNull(COL_NAME6)) + .put(COL_NAME7, BlobColumn.ofNull(COL_NAME7)) + .put(COL_NAME8, DateColumn.ofNull(COL_NAME8)) + .put(COL_NAME9, TimeColumn.ofNull(COL_NAME9)) + .put(COL_NAME10, TimestampTZColumn.ofNull(COL_NAME10)); + if (isTimestampTypeSupported()) { + columns.put(COL_NAME11, TimestampColumn.ofNull(COL_NAME11)); + } + + return columns.build(); } private Put preparePutWithoutValues( @@ -1327,6 +1463,14 @@ private String getColumnName(DataType dataType) { return COL_NAME6; case BLOB: return COL_NAME7; + case DATE: + return COL_NAME8; + case TIME: + return COL_NAME9; + case TIMESTAMPTZ: + return COL_NAME10; + case TIMESTAMP: + return COL_NAME11; default: throw new AssertionError(); } @@ -1357,6 +1501,18 @@ private ConditionalExpression buildConditionalExpression( case BLOB: return ConditionBuilder.buildConditionalExpression( BlobColumn.ofNull(columnToCompare.getName()), operator); + case DATE: + return ConditionBuilder.buildConditionalExpression( + DateColumn.ofNull(columnToCompare.getName()), operator); + case TIME: + return ConditionBuilder.buildConditionalExpression( + TimeColumn.ofNull(columnToCompare.getName()), operator); + case TIMESTAMPTZ: + return ConditionBuilder.buildConditionalExpression( + TimestampTZColumn.ofNull(columnToCompare.getName()), operator); + case TIMESTAMP: + return ConditionBuilder.buildConditionalExpression( + TimestampColumn.ofNull(columnToCompare.getName()), operator); default: throw new AssertionError(); } @@ -1503,4 +1659,8 @@ public DataType getDataType() { return dataType; } } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageCrossPartitionScanIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageCrossPartitionScanIntegrationTestBase.java index 6d9499726c..af0746e41b 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageCrossPartitionScanIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageCrossPartitionScanIntegrationTestBase.java @@ -7,6 +7,7 @@ import com.google.common.collect.ComparisonChain; import com.google.common.collect.ImmutableList; import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; import com.scalar.db.api.ConditionalExpression.Operator; import com.scalar.db.api.Scan.Ordering; import com.scalar.db.api.Scan.Ordering.Order; @@ -18,11 +19,15 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.Key; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.service.StorageFactory; import com.scalar.db.util.TestUtils; import java.io.IOException; @@ -44,12 +49,16 @@ import java.util.concurrent.Future; import java.util.stream.Collectors; import java.util.stream.IntStream; +import java.util.stream.Stream; import javax.annotation.Nonnull; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +67,7 @@ public abstract class DistributedStorageCrossPartitionScanIntegrationTestBase { private static final Logger logger = LoggerFactory.getLogger(DistributedStorageCrossPartitionScanIntegrationTestBase.class); - private static final String TEST_NAME = "storage_cross_partition_scan"; + private static final String TEST_NAME = "storage_cross_part_scan"; private static final String NAMESPACE_BASE_NAME = "int_test_" + TEST_NAME + "_"; private static final String CONDITION_TEST_TABLE = "condition_test_table"; private static final String PARTITION_KEY_NAME = "pk"; @@ -69,6 +78,10 @@ public abstract class DistributedStorageCrossPartitionScanIntegrationTestBase { private static final String COL_NAME5 = "c5"; private static final String COL_NAME6 = "c6"; private static final String COL_NAME7 = "c7"; + private static final String COL_NAME8 = "c8"; + private static final String COL_NAME9 = "c9"; + private static final String COL_NAME10 = "c10"; + private static final String COL_NAME11 = "c11"; private static final int CONDITION_TEST_TABLE_NUM_ROWS = 3; private static final int CONDITION_TEST_PREDICATE_VALUE = 2; private static final int FIRST_COLUMN_CARDINALITY = 5; @@ -78,7 +91,7 @@ public abstract class DistributedStorageCrossPartitionScanIntegrationTestBase { private ExecutorService executorService; private long seed; - private ThreadLocal random; + protected ThreadLocal random; private DistributedStorage storage; private DistributedStorageAdmin admin; @@ -133,11 +146,7 @@ protected boolean isParallelDdlSupported() { } private void createTableForConditionTests() throws ExecutionException { - Map options = getCreationOptions(); - admin.createNamespace(getNamespaceName(), true, options); - admin.createTable( - getNamespaceName(), - CONDITION_TEST_TABLE, + TableMetadata.Builder tableMetadata = TableMetadata.newBuilder() .addColumn(PARTITION_KEY_NAME, DataType.INT) .addColumn(COL_NAME1, DataType.INT) @@ -147,10 +156,18 @@ private void createTableForConditionTests() throws ExecutionException { .addColumn(COL_NAME5, DataType.TEXT) .addColumn(COL_NAME6, DataType.BOOLEAN) .addColumn(COL_NAME7, DataType.BLOB) - .addPartitionKey(PARTITION_KEY_NAME) - .build(), - true, - options); + .addColumn(COL_NAME8, DataType.DATE) + .addColumn(COL_NAME9, DataType.TIME) + .addColumn(COL_NAME10, DataType.TIMESTAMPTZ) + .addPartitionKey(PARTITION_KEY_NAME); + if (isTimestampTypeSupported()) { + tableMetadata.addColumn(COL_NAME11, DataType.TIMESTAMP); + } + + Map options = getCreationOptions(); + admin.createNamespace(getNamespaceName(), true, options); + admin.createTable( + getNamespaceName(), CONDITION_TEST_TABLE, tableMetadata.build(), true, options); } private void createTablesForOrderingTests() @@ -194,9 +211,13 @@ private String getTableName(DataType firstColumnType, DataType secondColumnType) } private ListMultimap getColumnTypes() { + List dataTypes = Lists.newArrayList(DataType.values()); + if (!isTimestampTypeSupported()) { + dataTypes.remove(DataType.TIMESTAMP); + } ListMultimap columnTypes = ArrayListMultimap.create(); - for (DataType firstColumnType : DataType.values()) { - for (DataType secondColumnType : DataType.values()) { + for (DataType firstColumnType : dataTypes) { + for (DataType secondColumnType : dataTypes) { columnTypes.put(firstColumnType, secondColumnType); } } @@ -346,7 +367,7 @@ private Put preparePut(int key, String text) { .build(); } - private List> prepareNonKeyColumns(int i) { + protected List> prepareNonKeyColumns(int i) { List> columns = new ArrayList<>(); columns.add(IntColumn.of(COL_NAME1, i)); columns.add(BigIntColumn.of(COL_NAME2, i)); @@ -355,6 +376,12 @@ private List> prepareNonKeyColumns(int i) { columns.add(TextColumn.of(COL_NAME5, String.valueOf(i))); columns.add(BooleanColumn.of(COL_NAME6, i % 2 == 0)); columns.add(BlobColumn.of(COL_NAME7, String.valueOf(i).getBytes(StandardCharsets.UTF_8))); + columns.add(DateColumn.of(COL_NAME8, DateColumn.MIN_VALUE.plusDays(i))); + columns.add(TimeColumn.of(COL_NAME9, TimeColumn.MIN_VALUE.plusSeconds(i))); + columns.add(TimestampTZColumn.of(COL_NAME10, TimestampTZColumn.MIN_VALUE.plusSeconds(i))); + if (isTimestampTypeSupported()) { + columns.add(TimestampColumn.of(COL_NAME11, TimestampColumn.MIN_VALUE.plusSeconds(i))); + } return columns; } @@ -367,6 +394,12 @@ private List> prepareNullColumns() { columns.add(TextColumn.ofNull(COL_NAME5)); columns.add(BooleanColumn.ofNull(COL_NAME6)); columns.add(BlobColumn.ofNull(COL_NAME7)); + columns.add(DateColumn.ofNull(COL_NAME8)); + columns.add(TimeColumn.ofNull(COL_NAME9)); + columns.add(TimestampTZColumn.ofNull(COL_NAME10)); + if (isTimestampTypeSupported()) { + columns.add(TimestampColumn.ofNull(COL_NAME11)); + } return columns; } @@ -815,9 +848,10 @@ private void scan_WithNullCondition_ShouldReturnProperResult(Column column, O assertScanResult(actual, getExpectedNullResults(operator), description(column, operator)); } - @Test - public void scan_WithConjunctiveNormalFormConditionsShouldReturnProperResult() - throws IOException, ExecutionException { + @ParameterizedTest(name = "column with conditions: {0}") + @MethodSource("provideColumnsForCNFConditionsTest") + public void scan_WithConjunctiveNormalFormConditionsShouldReturnProperResult( + List columnNamesToTest) throws IOException, ExecutionException { // Arrange prepareRecords(); BuildableScanAllWithOngoingWhereAnd builder = @@ -829,12 +863,17 @@ public void scan_WithConjunctiveNormalFormConditionsShouldReturnProperResult() prepareOrConditionSet( ImmutableList.of( IntColumn.of(PARTITION_KEY_NAME, 1), IntColumn.of(PARTITION_KEY_NAME, 2)))); - List> columns1 = prepareNonKeyColumns(1); - List> columns2 = prepareNonKeyColumns(2); + Map> columns1ByName = + prepareNonKeyColumns(1).stream().collect(Collectors.toMap(Column::getName, c -> c)); + Map> columns2ByName = + prepareNonKeyColumns(2).stream().collect(Collectors.toMap(Column::getName, c -> c)); List orConditionSets = - IntStream.range(0, columns1.size()) - .boxed() - .map(i -> prepareOrConditionSet(ImmutableList.of(columns1.get(i), columns2.get(i)))) + columnNamesToTest.stream() + .map( + columnName -> + prepareOrConditionSet( + ImmutableList.of( + columns1ByName.get(columnName), columns2ByName.get(columnName)))) .collect(Collectors.toList()); orConditionSets.forEach(builder::and); @@ -848,6 +887,13 @@ public void scan_WithConjunctiveNormalFormConditionsShouldReturnProperResult() "failed with CNF conditions"); } + protected Stream provideColumnsForCNFConditionsTest() { + List allColumns = + prepareNonKeyColumns(0).stream().map(Column::getName).collect(Collectors.toList()); + + return Stream.of(Arguments.of(allColumns)); + } + @Test public void scan_WithDisjunctiveNormalFormConditionsShouldReturnProperResult() throws IOException, ExecutionException { @@ -1202,4 +1248,8 @@ public String toString() { return "Tuple{" + "first=" + first + ", second=" + second + '}'; } } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultipleClusteringKeyScanIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultipleClusteringKeyScanIntegrationTestBase.java index 66676a5002..afda1a3cdd 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultipleClusteringKeyScanIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultipleClusteringKeyScanIntegrationTestBase.java @@ -52,7 +52,7 @@ private enum OrderingType { NOTHING } - private static final String TEST_NAME = "storage_mul_ckey"; + private static final String TEST_NAME = "st_mul_ck"; private static final String NAMESPACE_BASE_NAME = "int_test_" + TEST_NAME + "_"; private static final String PARTITION_KEY = "pkey"; private static final String FIRST_CLUSTERING_KEY = "ckey1"; @@ -102,8 +102,8 @@ protected String getNamespaceBaseName() { protected ListMultimap getClusteringKeyTypes() { ListMultimap clusteringKeyTypes = ArrayListMultimap.create(); - for (DataType firstClusteringKeyType : DataType.values()) { - for (DataType secondClusteringKeyType : DataType.values()) { + for (DataType firstClusteringKeyType : getDataTypes()) { + for (DataType secondClusteringKeyType : getDataTypes()) { clusteringKeyTypes.put(firstClusteringKeyType, secondClusteringKeyType); } } @@ -123,12 +123,13 @@ private void createTables() throws java.util.concurrent.ExecutionException, Inte Map options = getCreationOptions(); for (DataType firstClusteringKeyType : clusteringKeyTypes.keySet()) { - Callable testCallable = - () -> { - admin.createNamespace(getNamespaceName(firstClusteringKeyType), true, options); - for (DataType secondClusteringKeyType : - clusteringKeyTypes.get(firstClusteringKeyType)) { - for (Order firstClusteringOrder : Order.values()) { + for (Order firstClusteringOrder : Order.values()) { + Callable testCallable = + () -> { + admin.createNamespace( + getNamespaceName(firstClusteringKeyType, firstClusteringOrder), true, options); + for (DataType secondClusteringKeyType : + clusteringKeyTypes.get(firstClusteringKeyType)) { for (Order secondClusteringOrder : Order.values()) { createTable( firstClusteringKeyType, @@ -138,10 +139,10 @@ private void createTables() throws java.util.concurrent.ExecutionException, Inte options); } } - } - return null; - }; - testCallables.add(testCallable); + return null; + }; + testCallables.add(testCallable); + } } // We firstly execute the first one and then the rest. This is because the first table creation @@ -163,7 +164,7 @@ private void createTable( Map options) throws ExecutionException { admin.createTable( - getNamespaceName(firstClusteringKeyType), + getNamespaceName(firstClusteringKeyType, firstClusteringOrder), getTableName( firstClusteringKeyType, firstClusteringOrder, @@ -218,14 +219,14 @@ public void afterAll() throws Exception { private void dropTables() throws java.util.concurrent.ExecutionException, InterruptedException { List> testCallables = new ArrayList<>(); for (DataType firstClusteringKeyType : clusteringKeyTypes.keySet()) { - Callable testCallable = - () -> { - for (DataType secondClusteringKeyType : - clusteringKeyTypes.get(firstClusteringKeyType)) { - for (Order firstClusteringOrder : Order.values()) { + for (Order firstClusteringOrder : Order.values()) { + Callable testCallable = + () -> { + for (DataType secondClusteringKeyType : + clusteringKeyTypes.get(firstClusteringKeyType)) { for (Order secondClusteringOrder : Order.values()) { admin.dropTable( - getNamespaceName(firstClusteringKeyType), + getNamespaceName(firstClusteringKeyType, firstClusteringOrder), getTableName( firstClusteringKeyType, firstClusteringOrder, @@ -233,11 +234,11 @@ private void dropTables() throws java.util.concurrent.ExecutionException, Interr secondClusteringOrder)); } } - } - admin.dropNamespace(getNamespaceName(firstClusteringKeyType)); - return null; - }; - testCallables.add(testCallable); + admin.dropNamespace(getNamespaceName(firstClusteringKeyType, firstClusteringOrder)); + return null; + }; + testCallables.add(testCallable); + } } // We firstly execute the callables without the last one. And then we execute the last one. This @@ -254,7 +255,7 @@ private void truncateTable( Order secondClusteringOrder) throws ExecutionException { admin.truncateTable( - getNamespaceName(firstClusteringKeyType), + getNamespaceName(firstClusteringKeyType, firstClusteringOrder), getTableName( firstClusteringKeyType, firstClusteringOrder, @@ -275,8 +276,8 @@ private String getTableName( secondClusteringOrder.toString()); } - private String getNamespaceName(DataType firstClusteringKeyType) { - return namespaceBaseName + firstClusteringKeyType; + private String getNamespaceName(DataType firstClusteringKeyType, Order firstClusteringOrder) { + return namespaceBaseName + firstClusteringKeyType + "_" + firstClusteringOrder; } @Test @@ -1785,7 +1786,7 @@ private Put preparePut( Order secondClusteringOrder, Column secondClusteringKeyValue) { return Put.newBuilder() - .namespace(getNamespaceName(firstClusteringKeyType)) + .namespace(getNamespaceName(firstClusteringKeyType, firstClusteringOrder)) .table( getTableName( firstClusteringKeyType, @@ -1915,7 +1916,7 @@ private Scan getScan( int limit) { BuildableScanWithPartitionKey scan = Scan.newBuilder() - .namespace(getNamespaceName(firstClusteringKeyType)) + .namespace(getNamespaceName(firstClusteringKeyType, firstClusteringOrder)) .table( getTableName( firstClusteringKeyType, @@ -2162,4 +2163,8 @@ public String toString() { return "ClusteringKey{" + "first=" + first + ", second=" + second + '}'; } } + + protected List getDataTypes() { + return Arrays.asList(DataType.values()); + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultiplePartitionKeyIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultiplePartitionKeyIntegrationTestBase.java index f61f901fff..ac8e0b416d 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultiplePartitionKeyIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageMultiplePartitionKeyIntegrationTestBase.java @@ -84,18 +84,8 @@ protected String getNamespaceBaseName() { protected ListMultimap getPartitionKeyTypes() { ListMultimap partitionKeyTypes = ArrayListMultimap.create(); - for (DataType firstPartitionKeyType : DataType.values()) { - if (!isFloatTypeKeySupported() - && (firstPartitionKeyType == DataType.FLOAT - || firstPartitionKeyType == DataType.DOUBLE)) { - continue; - } - for (DataType secondPartitionKeyType : DataType.values()) { - if (!isFloatTypeKeySupported() - && (secondPartitionKeyType == DataType.FLOAT - || secondPartitionKeyType == DataType.DOUBLE)) { - continue; - } + for (DataType firstPartitionKeyType : getDataTypes()) { + for (DataType secondPartitionKeyType : getDataTypes()) { partitionKeyTypes.put(firstPartitionKeyType, secondPartitionKeyType); } } @@ -110,10 +100,6 @@ protected boolean isParallelDdlSupported() { return true; } - protected boolean isFloatTypeKeySupported() { - return true; - } - private void createTables() throws java.util.concurrent.ExecutionException, InterruptedException { List> testCallables = new ArrayList<>(); @@ -506,4 +492,8 @@ public PartitionKey(Column first, Column second) { this.second = second; } } + + protected List getDataTypes() { + return Arrays.asList(DataType.values()); + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSecondaryIndexIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSecondaryIndexIntegrationTestBase.java index c90f8c3d3d..8b5c4f0cb3 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSecondaryIndexIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSecondaryIndexIntegrationTestBase.java @@ -2,6 +2,7 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.google.common.collect.Sets; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; @@ -9,7 +10,6 @@ import com.scalar.db.service.StorageFactory; import com.scalar.db.util.TestUtils; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -69,7 +69,7 @@ protected String getNamespace() { } protected Set getSecondaryIndexTypes() { - return new HashSet<>(Arrays.asList(DataType.values())); + return Sets.newHashSet(DataType.values()); } private void createTables() throws ExecutionException { @@ -306,4 +306,8 @@ protected Column getColumnWithMinValue(String columnName, DataType dataType) protected Column getColumnWithMaxValue(String columnName, DataType dataType) { return TestUtils.getColumnWithMaxValue(columnName, dataType); } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSingleClusteringKeyScanIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSingleClusteringKeyScanIntegrationTestBase.java index 3cd85f8692..1029a51eab 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSingleClusteringKeyScanIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSingleClusteringKeyScanIntegrationTestBase.java @@ -53,7 +53,7 @@ private enum OrderingType { private DistributedStorageAdmin admin; private DistributedStorage storage; private String namespace; - private Set clusteringKeyTypes; + private List clusteringKeyTypes; private long seed; @@ -79,8 +79,8 @@ protected String getNamespace() { return NAMESPACE; } - protected Set getClusteringKeyTypes() { - return new HashSet<>(Arrays.asList(DataType.values())); + protected List getClusteringKeyTypes() { + return Arrays.asList(DataType.values()); } private void createTables() throws ExecutionException { diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSinglePartitionKeyIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSinglePartitionKeyIntegrationTestBase.java index 9d59295801..6ed7a3a68e 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSinglePartitionKeyIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageSinglePartitionKeyIntegrationTestBase.java @@ -44,7 +44,7 @@ public abstract class DistributedStorageSinglePartitionKeyIntegrationTestBase { private DistributedStorageAdmin admin; private DistributedStorage storage; private String namespace; - private Set partitionKeyTypes; + private List partitionKeyTypes; private long seed; @@ -69,12 +69,8 @@ protected String getNamespace() { return NAMESPACE; } - protected Set getPartitionKeyTypes() { - return new HashSet<>(Arrays.asList(DataType.values())); - } - - protected boolean isFloatTypeKeySupported() { - return true; + protected List getPartitionKeyTypes() { + return Arrays.asList(DataType.values()); } private void createTables() throws ExecutionException { @@ -147,10 +143,6 @@ private String getTableName(DataType partitionKeyType) { @Test public void getAndScanAndDelete_ShouldBehaveCorrectly() throws ExecutionException, IOException { for (DataType partitionKeyType : partitionKeyTypes) { - if (!isFloatTypeKeySupported() - && (partitionKeyType == DataType.FLOAT || partitionKeyType == DataType.DOUBLE)) { - continue; - } random.setSeed(seed); truncateTable(partitionKeyType); diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminImportTableIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminImportTableIntegrationTestBase.java index 49f3bb28b9..25010b5be3 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminImportTableIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminImportTableIntegrationTestBase.java @@ -3,13 +3,21 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import com.scalar.db.api.DistributedStorageAdminImportTableIntegrationTestBase.TestData; import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; import com.scalar.db.service.TransactionFactory; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; -import java.util.Map.Entry; +import java.util.Optional; import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -26,9 +34,10 @@ public abstract class DistributedTransactionAdminImportTableIntegrationTestBase private static final String TEST_NAME = "tx_admin_import_table"; private static final String NAMESPACE = "int_test_" + TEST_NAME; - private final Map tables = new HashMap<>(); + private final List testDataList = new ArrayList<>(); protected DistributedTransactionAdmin admin; + protected DistributedTransactionManager manager; @BeforeAll public void beforeAll() throws Exception { @@ -48,13 +57,11 @@ protected Map getCreationOptions() { } private void dropTable() throws Exception { - for (Entry entry : tables.entrySet()) { - String table = entry.getKey(); - TableMetadata metadata = entry.getValue(); - if (metadata == null) { - dropNonImportableTable(table); + for (TestData testData : testDataList) { + if (!testData.isImportableTable()) { + dropNonImportableTable(testData.getTableName()); } else { - admin.dropTable(getNamespace(), table); + admin.dropTable(getNamespace(), testData.getTableName()); } } if (!admin.namespaceExists(getNamespace())) { @@ -62,12 +69,15 @@ private void dropTable() throws Exception { admin.repairNamespace(getNamespace(), getCreationOptions()); } admin.dropNamespace(getNamespace()); + admin.dropCoordinatorTables(); } @BeforeEach protected void setUp() throws Exception { TransactionFactory factory = TransactionFactory.create(getProperties(TEST_NAME)); admin = factory.getTransactionAdmin(); + manager = factory.getTransactionManager(); + admin.createCoordinatorTables(true); } @AfterEach @@ -90,27 +100,29 @@ protected void afterEach() { @AfterAll protected void afterAll() throws Exception {} - protected abstract Map createExistingDatabaseWithAllDataTypes() - throws Exception; + protected abstract List createExistingDatabaseWithAllDataTypes() throws Exception; protected abstract void dropNonImportableTable(String table) throws Exception; @Test public void importTable_ShouldWorkProperly() throws Exception { // Arrange - tables.putAll(createExistingDatabaseWithAllDataTypes()); + testDataList.addAll(createExistingDatabaseWithAllDataTypes()); // Act Assert - for (Entry entry : tables.entrySet()) { - String table = entry.getKey(); - TableMetadata metadata = entry.getValue(); - if (metadata == null) { - importTable_ForNonImportableTable_ShouldThrowIllegalArgumentException(table); + for (TestData testData : testDataList) { + if (!testData.isImportableTable()) { + importTable_ForNonImportableTable_ShouldThrowIllegalArgumentException( + testData.getTableName()); } else { - importTable_ForImportableTable_ShouldImportProperly(table, metadata); + importTable_ForImportableTable_ShouldImportProperly( + testData.getTableName(), + testData.getOverrideColumnsType(), + testData.getTableMetadata()); } } importTable_ForNonExistingTable_ShouldThrowIllegalArgumentException(); + importTable_ForImportedTable_ShouldInsertThenGetCorrectly(); } @Test @@ -123,9 +135,10 @@ public void importTable_ForUnsupportedDatabase_ShouldThrowUnsupportedOperationEx } private void importTable_ForImportableTable_ShouldImportProperly( - String table, TableMetadata metadata) throws ExecutionException { + String table, Map overrideColumnsType, TableMetadata metadata) + throws ExecutionException { // Act - admin.importTable(getNamespace(), table, Collections.emptyMap()); + admin.importTable(getNamespace(), table, Collections.emptyMap(), overrideColumnsType); // Assert assertThat(admin.namespaceExists(getNamespace())).isTrue(); @@ -145,4 +158,60 @@ private void importTable_ForNonExistingTable_ShouldThrowIllegalArgumentException () -> admin.importTable(getNamespace(), "non-existing-table", Collections.emptyMap())) .isInstanceOf(IllegalArgumentException.class); } + + public void importTable_ForImportedTable_ShouldInsertThenGetCorrectly() + throws TransactionException { + // Arrange + List inserts = + testDataList.stream() + .filter(TestData::isImportableTable) + .map(td -> td.getInsert(getNamespace(), td.getTableName())) + .collect(Collectors.toList()); + List gets = + testDataList.stream() + .filter(TestData::isImportableTable) + .map(td -> td.getGet(getNamespace(), td.getTableName())) + .collect(Collectors.toList()); + + // Act + DistributedTransaction tx = manager.start(); + for (Insert insert : inserts) { + tx.insert(insert); + } + tx.commit(); + + List> results = new ArrayList<>(); + tx = manager.start(); + for (Get get : gets) { + results.add(tx.get(get)); + } + tx.commit(); + + // Assert + for (int i = 0; i < results.size(); i++) { + Insert insert = inserts.get(i); + Optional optResult = results.get(i); + + assertThat(optResult).isPresent(); + Result result = optResult.get(); + Set actualColumnNamesWithoutKeys = new HashSet<>(result.getContainedColumnNames()); + actualColumnNamesWithoutKeys.removeAll( + insert.getPartitionKey().getColumns().stream() + .map(Column::getName) + .collect(Collectors.toSet())); + + assertThat(actualColumnNamesWithoutKeys) + .containsExactlyInAnyOrderElementsOf(insert.getColumns().keySet()); + result.getColumns().entrySet().stream() + .filter( + e -> { + // Filter partition key columns + return !insert.getPartitionKey().getColumns().contains(e.getValue()); + }) + .forEach( + entry -> + // Assert each result column is equal to the column inserted with the put + assertThat(entry.getValue()).isEqualTo(insert.getColumns().get(entry.getKey()))); + } + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminIntegrationTestBase.java index 579205ceaa..8fd3e8f16a 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminIntegrationTestBase.java @@ -12,6 +12,10 @@ import com.scalar.db.service.TransactionFactory; import com.scalar.db.util.AdminTestUtils; import java.nio.charset.StandardCharsets; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -48,6 +52,10 @@ public abstract class DistributedTransactionAdminIntegrationTestBase { protected static final String COL_NAME9 = "c9"; protected static final String COL_NAME10 = "c10"; protected static final String COL_NAME11 = "c11"; + private static final String COL_NAME12 = "c12"; + private static final String COL_NAME13 = "c13"; + private static final String COL_NAME14 = "c14"; + private static final String COL_NAME15 = "c15"; protected static final TableMetadata TABLE_METADATA = TableMetadata.newBuilder() @@ -62,6 +70,9 @@ public abstract class DistributedTransactionAdminIntegrationTestBase { .addColumn(COL_NAME9, DataType.DOUBLE) .addColumn(COL_NAME10, DataType.BOOLEAN) .addColumn(COL_NAME11, DataType.BLOB) + .addColumn(COL_NAME12, DataType.DATE) + .addColumn(COL_NAME13, DataType.TIME) + .addColumn(COL_NAME14, DataType.TIMESTAMPTZ) .addPartitionKey(COL_NAME2) .addPartitionKey(COL_NAME1) .addClusteringKey(COL_NAME4, Scan.Ordering.Order.ASC) @@ -148,6 +159,9 @@ private void dropTables() throws ExecutionException { public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() throws ExecutionException { // Arrange + if (isTimestampTypeSupported()) { + admin.addNewColumnToTable(namespace1, TABLE1, COL_NAME15, DataType.TIMESTAMP); + } // Act TableMetadata tableMetadata = admin.getTableMetadata(namespace1, TABLE1); @@ -165,7 +179,11 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(iterator.next()).isEqualTo(COL_NAME4); assertThat(iterator.next()).isEqualTo(COL_NAME3); - assertThat(tableMetadata.getColumnNames().size()).isEqualTo(11); + if (isTimestampTypeSupported()) { + assertThat(tableMetadata.getColumnNames().size()).isEqualTo(15); + } else { + assertThat(tableMetadata.getColumnNames().size()).isEqualTo(14); + } assertThat(tableMetadata.getColumnNames().contains(COL_NAME1)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME2)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME3)).isTrue(); @@ -177,6 +195,12 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(tableMetadata.getColumnNames().contains(COL_NAME9)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME10)).isTrue(); assertThat(tableMetadata.getColumnNames().contains(COL_NAME11)).isTrue(); + assertThat(tableMetadata.getColumnNames().contains(COL_NAME12)).isTrue(); + assertThat(tableMetadata.getColumnNames().contains(COL_NAME13)).isTrue(); + assertThat(tableMetadata.getColumnNames().contains(COL_NAME14)).isTrue(); + if (isTimestampTypeSupported()) { + assertThat(tableMetadata.getColumnNames().contains(COL_NAME15)).isTrue(); + } assertThat(tableMetadata.getColumnDataType(COL_NAME1)).isEqualTo(DataType.INT); assertThat(tableMetadata.getColumnDataType(COL_NAME2)).isEqualTo(DataType.TEXT); @@ -189,6 +213,12 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(tableMetadata.getColumnDataType(COL_NAME9)).isEqualTo(DataType.DOUBLE); assertThat(tableMetadata.getColumnDataType(COL_NAME10)).isEqualTo(DataType.BOOLEAN); assertThat(tableMetadata.getColumnDataType(COL_NAME11)).isEqualTo(DataType.BLOB); + assertThat(tableMetadata.getColumnDataType(COL_NAME12)).isEqualTo(DataType.DATE); + assertThat(tableMetadata.getColumnDataType(COL_NAME13)).isEqualTo(DataType.TIME); + assertThat(tableMetadata.getColumnDataType(COL_NAME14)).isEqualTo(DataType.TIMESTAMPTZ); + if (isTimestampTypeSupported()) { + assertThat(tableMetadata.getColumnDataType(COL_NAME15)).isEqualTo(DataType.TIMESTAMP); + } assertThat(tableMetadata.getClusteringOrder(COL_NAME1)).isNull(); assertThat(tableMetadata.getClusteringOrder(COL_NAME2)).isNull(); @@ -201,6 +231,10 @@ public void getTableMetadata_CorrectTableGiven_ShouldReturnCorrectMetadata() assertThat(tableMetadata.getClusteringOrder(COL_NAME9)).isNull(); assertThat(tableMetadata.getClusteringOrder(COL_NAME10)).isNull(); assertThat(tableMetadata.getClusteringOrder(COL_NAME11)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME12)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME13)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME14)).isNull(); + assertThat(tableMetadata.getClusteringOrder(COL_NAME15)).isNull(); assertThat(tableMetadata.getSecondaryIndexNames().size()).isEqualTo(2); assertThat(tableMetadata.getSecondaryIndexNames().contains(COL_NAME5)).isTrue(); @@ -467,7 +501,7 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre try { // Arrange Map options = getCreationOptions(); - TableMetadata metadata = + TableMetadata.Builder metadataBuilder = TableMetadata.newBuilder() .addColumn(COL_NAME1, DataType.INT) .addColumn(COL_NAME2, DataType.INT) @@ -478,13 +512,19 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre .addColumn(COL_NAME7, DataType.BOOLEAN) .addColumn(COL_NAME8, DataType.BLOB) .addColumn(COL_NAME9, DataType.TEXT) + .addColumn(COL_NAME10, DataType.DATE) + .addColumn(COL_NAME11, DataType.TIME) + .addColumn(COL_NAME12, DataType.TIMESTAMPTZ) .addPartitionKey(COL_NAME1) - .addSecondaryIndex(COL_NAME9) - .build(); + .addSecondaryIndex(COL_NAME9); + if (isTimestampTypeSupported()) { + metadataBuilder = metadataBuilder.addColumn(COL_NAME13, DataType.TIMESTAMP); + } + TableMetadata metadata = metadataBuilder.build(); admin.createTable(namespace1, TABLE4, metadata, options); transactionManager = transactionFactory.getTransactionManager(); - transactionManager.put( - Put.newBuilder() + InsertBuilder.Buildable insert = + Insert.newBuilder() .namespace(namespace1) .table(TABLE4) .partitionKey(Key.ofInt(COL_NAME1, 1)) @@ -496,7 +536,18 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre .booleanValue(COL_NAME7, true) .blobValue(COL_NAME8, "8".getBytes(StandardCharsets.UTF_8)) .textValue(COL_NAME9, "9") - .build()); + .dateValue(COL_NAME10, LocalDate.of(2020, 6, 2)) + .timeValue(COL_NAME11, LocalTime.of(12, 2, 6, 123_456_000)) + .timestampTZValue( + COL_NAME12, + LocalDateTime.of(LocalDate.of(2020, 6, 2), LocalTime.of(12, 2, 6, 123_000_000)) + .toInstant(ZoneOffset.UTC)); + if (isTimestampTypeSupported()) { + insert.timestampValue( + COL_NAME13, + LocalDateTime.of(LocalDate.of(2020, 6, 2), LocalTime.of(12, 2, 6, 123_000_000))); + } + transactionManager.insert(insert.build()); // Act admin.createIndex(namespace1, TABLE4, COL_NAME2, options); @@ -508,6 +559,12 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre admin.createIndex(namespace1, TABLE4, COL_NAME7, options); } admin.createIndex(namespace1, TABLE4, COL_NAME8, options); + admin.createIndex(namespace1, TABLE4, COL_NAME10, options); + admin.createIndex(namespace1, TABLE4, COL_NAME11, options); + admin.createIndex(namespace1, TABLE4, COL_NAME12, options); + if (isTimestampTypeSupported()) { + admin.createIndex(namespace1, TABLE4, COL_NAME13, options); + } // Assert assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME2)).isTrue(); @@ -519,16 +576,38 @@ public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorre assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME7)).isTrue(); } assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME8)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME9)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME10)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME11)).isTrue(); + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME12)).isTrue(); + if (isTimestampTypeSupported()) { + assertThat(admin.indexExists(namespace1, TABLE4, COL_NAME13)).isTrue(); + } + + Set actualSecondaryIndexNames = + admin.getTableMetadata(namespace1, TABLE4).getSecondaryIndexNames(); + assertThat(actualSecondaryIndexNames) + .contains( + COL_NAME2, + COL_NAME3, + COL_NAME4, + COL_NAME5, + COL_NAME6, + COL_NAME8, + COL_NAME9, + COL_NAME10, + COL_NAME11, + COL_NAME12); + int indexCount = 10; if (isIndexOnBooleanColumnSupported()) { - assertThat(admin.getTableMetadata(namespace1, TABLE4).getSecondaryIndexNames()) - .containsOnly( - COL_NAME2, COL_NAME3, COL_NAME4, COL_NAME5, COL_NAME6, COL_NAME7, COL_NAME8, - COL_NAME9); - } else { - assertThat(admin.getTableMetadata(namespace1, TABLE4).getSecondaryIndexNames()) - .containsOnly( - COL_NAME2, COL_NAME3, COL_NAME4, COL_NAME5, COL_NAME6, COL_NAME8, COL_NAME9); + assertThat(actualSecondaryIndexNames).contains(COL_NAME7); + indexCount++; } + if (isTimestampTypeSupported()) { + assertThat(actualSecondaryIndexNames).contains(COL_NAME13); + indexCount++; + } + assertThat(actualSecondaryIndexNames).hasSize(indexCount); } finally { admin.dropTable(namespace1, TABLE4, true); @@ -883,4 +962,8 @@ public void getNamespaceNames_ShouldReturnCreatedNamespaces() throws ExecutionEx protected boolean isIndexOnBooleanColumnSupported() { return true; } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminRepairIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminRepairIntegrationTestBase.java index bc43a09d33..637bb4d497 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminRepairIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionAdminRepairIntegrationTestBase.java @@ -42,32 +42,46 @@ public abstract class DistributedTransactionAdminRepairIntegrationTestBase { protected static final String COL_NAME9 = "c9"; protected static final String COL_NAME10 = "c10"; protected static final String COL_NAME11 = "c11"; - - protected static final TableMetadata TABLE_METADATA = - TableMetadata.newBuilder() - .addColumn(COL_NAME1, DataType.INT) - .addColumn(COL_NAME2, DataType.TEXT) - .addColumn(COL_NAME3, DataType.TEXT) - .addColumn(COL_NAME4, DataType.INT) - .addColumn(COL_NAME5, DataType.INT) - .addColumn(COL_NAME6, DataType.TEXT) - .addColumn(COL_NAME7, DataType.BIGINT) - .addColumn(COL_NAME8, DataType.FLOAT) - .addColumn(COL_NAME9, DataType.DOUBLE) - .addColumn(COL_NAME10, DataType.BOOLEAN) - .addColumn(COL_NAME11, DataType.BLOB) - .addPartitionKey(COL_NAME2) - .addPartitionKey(COL_NAME1) - .addClusteringKey(COL_NAME4, Scan.Ordering.Order.ASC) - .addClusteringKey(COL_NAME3, Scan.Ordering.Order.DESC) - .addSecondaryIndex(COL_NAME5) - .addSecondaryIndex(COL_NAME6) - .build(); + private static final String COL_NAME12 = "c12"; + private static final String COL_NAME13 = "c13"; + private static final String COL_NAME14 = "c14"; + private static final String COL_NAME15 = "c15"; protected DistributedTransactionAdmin admin; protected DistributedStorageAdmin storageAdmin; protected AdminTestUtils adminTestUtils = null; + protected TableMetadata getTableMetadata() { + TableMetadata.Builder builder = + TableMetadata.newBuilder() + .addColumn(COL_NAME1, DataType.INT) + .addColumn(COL_NAME2, DataType.TEXT) + .addColumn(COL_NAME3, DataType.TEXT) + .addColumn(COL_NAME4, DataType.INT) + .addColumn(COL_NAME5, DataType.INT) + .addColumn(COL_NAME6, DataType.TEXT) + .addColumn(COL_NAME7, DataType.BIGINT) + .addColumn(COL_NAME8, DataType.FLOAT) + .addColumn(COL_NAME9, DataType.DOUBLE) + .addColumn(COL_NAME10, DataType.BOOLEAN) + .addColumn(COL_NAME11, DataType.BLOB) + .addColumn(COL_NAME12, DataType.DATE) + .addColumn(COL_NAME13, DataType.TIME) + .addColumn(COL_NAME14, DataType.TIMESTAMPTZ); + if (isTimestampTypeSupported()) { + builder.addColumn(COL_NAME15, DataType.TIMESTAMP); + } + builder + .addPartitionKey(COL_NAME2) + .addPartitionKey(COL_NAME1) + .addClusteringKey(COL_NAME4, Scan.Ordering.Order.ASC) + .addClusteringKey(COL_NAME3, Scan.Ordering.Order.DESC) + .addSecondaryIndex(COL_NAME5) + .addSecondaryIndex(COL_NAME6) + .build(); + return builder.build(); + } + @BeforeAll public void beforeAll() throws Exception { initialize(TEST_NAME); @@ -126,7 +140,7 @@ private void createTable() throws ExecutionException { Map options = getCreationOptions(); admin.createCoordinatorTables(options); admin.createNamespace(getNamespace(), options); - admin.createTable(getNamespace(), getTable(), TABLE_METADATA, options); + admin.createTable(getNamespace(), getTable(), getTableMetadata(), options); } protected Map getCreationOptions() { @@ -152,11 +166,11 @@ protected void afterEach() throws Exception { @Test public void repairTable_ForExistingTableAndMetadata_ShouldDoNothing() throws Exception { // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); } @Test @@ -167,13 +181,13 @@ public void repairTableAndCoordinatorTables_ForDeletedMetadataTable_ShouldRepair // Act waitForDifferentSessionDdl(); - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); admin.repairCoordinatorTables(getCreationOptions()); // Assert waitForDifferentSessionDdl(); assertThat(admin.tableExists(getNamespace(), TABLE)).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), TABLE)).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), TABLE)).isEqualTo(getTableMetadata()); assertThat(adminTestUtils.areTableAndMetadataForCoordinatorTablesPresent()).isTrue(); } @@ -184,12 +198,12 @@ public void repairTableAndCoordinatorTables_ForTruncatedMetadataTable_ShouldRepa adminTestUtils.truncateMetadataTable(); // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); admin.repairCoordinatorTables(getCreationOptions()); // Assert assertThat(admin.tableExists(getNamespace(), TABLE)).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), TABLE)).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), TABLE)).isEqualTo(getTableMetadata()); assertThat(adminTestUtils.areTableAndMetadataForCoordinatorTablesPresent()).isTrue(); } @@ -199,11 +213,11 @@ public void repairTable_ForCorruptedMetadataTable_ShouldRepairProperly() throws adminTestUtils.corruptMetadata(getNamespace(), getTable()); // Act - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert assertThat(admin.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); assertThat(adminTestUtils.areTableAndMetadataForCoordinatorTablesPresent()).isTrue(); } @@ -239,12 +253,12 @@ public void repairTable_ForNonExistingTableButExistingMetadata_ShouldCreateTable // Act waitForDifferentSessionDdl(); - admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()); + admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions()); // Assert waitForDifferentSessionDdl(); assertThat(adminTestUtils.tableExists(getNamespace(), getTable())).isTrue(); - assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA); + assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata()); assertThat(adminTestUtils.areTableAndMetadataForCoordinatorTablesPresent()).isTrue(); } @@ -317,4 +331,8 @@ private boolean coordinatorNamespaceMetadataExits() throws ExecutionException { .orElse(Coordinator.NAMESPACE); return storageAdmin.namespaceExists(coordinatorNamespace); } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java index d1925dcfb6..7779090ece 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java @@ -16,15 +16,32 @@ import com.scalar.db.exception.transaction.TransactionException; import com.scalar.db.exception.transaction.TransactionNotFoundException; import com.scalar.db.exception.transaction.UnsatisfiedConditionException; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.IntValue; import com.scalar.db.io.Key; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.io.Value; import com.scalar.db.service.TransactionFactory; import com.scalar.db.util.TestUtils; import com.scalar.db.util.TestUtils.ExpectedResult; import com.scalar.db.util.TestUtils.ExpectedResult.ExpectedResultBuilder; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -34,6 +51,7 @@ import java.util.Properties; import java.util.stream.IntStream; import org.assertj.core.api.Assertions; +import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -53,19 +71,19 @@ public abstract class DistributedTransactionIntegrationTestBase { protected static final String ACCOUNT_TYPE = "account_type"; protected static final String BALANCE = "balance"; protected static final String SOME_COLUMN = "some_column"; + protected static final String BOOLEAN_COL = "boolean_col"; + protected static final String BIGINT_COL = "bigint_col"; + protected static final String FLOAT_COL = "float_col"; + protected static final String DOUBLE_COL = "double_col"; + protected static final String TEXT_COL = "text_col"; + protected static final String BLOB_COL = "blob_col"; + protected static final String DATE_COL = "date_col"; + protected static final String TIME_COL = "time_col"; + protected static final String TIMESTAMP_COL = "timestamp_col"; + protected static final String TIMESTAMPTZ_COL = "timestamptz_col"; protected static final int INITIAL_BALANCE = 1000; protected static final int NUM_ACCOUNTS = 4; protected static final int NUM_TYPES = 4; - protected static final TableMetadata TABLE_METADATA = - TableMetadata.newBuilder() - .addColumn(ACCOUNT_ID, DataType.INT) - .addColumn(ACCOUNT_TYPE, DataType.INT) - .addColumn(BALANCE, DataType.INT) - .addColumn(SOME_COLUMN, DataType.INT) - .addPartitionKey(ACCOUNT_ID) - .addClusteringKey(ACCOUNT_TYPE) - .addSecondaryIndex(SOME_COLUMN) - .build(); protected DistributedTransactionAdmin admin; protected DistributedTransactionManager manager; protected String namespace; @@ -93,10 +111,32 @@ protected String getNamespaceBaseName() { } private void createTables() throws ExecutionException { + TableMetadata.Builder tableMetadata = + TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.INT) + .addColumn(BOOLEAN_COL, DataType.BOOLEAN) + .addColumn(BIGINT_COL, DataType.BIGINT) + .addColumn(FLOAT_COL, DataType.FLOAT) + .addColumn(DOUBLE_COL, DataType.DOUBLE) + .addColumn(TEXT_COL, DataType.TEXT) + .addColumn(BLOB_COL, DataType.BLOB) + .addColumn(DATE_COL, DataType.DATE) + .addColumn(TIME_COL, DataType.TIME) + .addColumn(TIMESTAMPTZ_COL, DataType.TIMESTAMPTZ) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .addSecondaryIndex(SOME_COLUMN); + if (isTimestampTypeSupported()) { + tableMetadata.addColumn(TIMESTAMP_COL, DataType.TIMESTAMP); + } + Map options = getCreationOptions(); admin.createCoordinatorTables(true, options); admin.createNamespace(namespace, true, options); - admin.createTable(namespace, TABLE, TABLE_METADATA, true, options); + admin.createTable(namespace, TABLE, tableMetadata.build(), true, options); } protected Map getCreationOptions() { @@ -145,18 +185,14 @@ public void get_GetGivenForCommittedRecord_ShouldReturnRecord() throws Transacti // Arrange populateRecords(); DistributedTransaction transaction = manager.start(); - Get get = prepareGet(0, 0); + Get get = prepareGet(2, 3); // Act Optional result = transaction.get(get); transaction.commit(); // Assert - assertThat(result.isPresent()).isTrue(); - assertThat(result.get().getInt(ACCOUNT_ID)).isEqualTo(0); - assertThat(result.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(getBalance(result.get())).isEqualTo(INITIAL_BALANCE); - assertThat(result.get().getInt(SOME_COLUMN)).isEqualTo(0); + assertResult(2, 3, result); } @Test @@ -190,22 +226,31 @@ public void get_GetWithMatchedConjunctionsGivenForCommittedRecord_ShouldReturnRe // Arrange populateRecords(); DistributedTransaction transaction = manager.start(); - Get get = - Get.newBuilder(prepareGet(1, 1)) + + GetBuilder.BuildableGetFromExistingWithOngoingWhereAnd get = + Get.newBuilder(prepareGet(1, 2)) .where(ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isEqualToInt(1)) - .build(); + .and(ConditionBuilder.column(SOME_COLUMN).isEqualToInt(2)) + .and(ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true)) + .and(ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE)) + .and(ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F)) + .and(ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10)) + .and(ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo")) + .and(ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1))) + .and(ConditionBuilder.column(TIME_COL).isNotNullTime()) + .and(ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); + if (isTimestampTypeSupported()) { + get.and( + ConditionBuilder.column(TIMESTAMP_COL) + .isGreaterThanOrEqualToTimestamp(LocalDateTime.of(1970, 1, 1, 1, 2))); + } // Act - Optional result = transaction.get(get); + Optional result = transaction.get(get.build()); transaction.commit(); // Assert - assertThat(result.isPresent()).isTrue(); - assertThat(result.get().getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(result.get().getInt(ACCOUNT_TYPE)).isEqualTo(1); - assertThat(getBalance(result.get())).isEqualTo(INITIAL_BALANCE); - assertThat(result.get().getInt(SOME_COLUMN)).isEqualTo(1); + assertResult(1, 2, result); } @Test @@ -241,20 +286,9 @@ public void scan_ScanGivenForCommittedRecord_ShouldReturnRecords() throws Transa // Assert assertThat(results.size()).isEqualTo(3); - assertThat(results.get(0).getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(results.get(0).getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(getBalance(results.get(0))).isEqualTo(INITIAL_BALANCE); - assertThat(results.get(0).getInt(SOME_COLUMN)).isEqualTo(0); - - assertThat(results.get(1).getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(results.get(1).getInt(ACCOUNT_TYPE)).isEqualTo(1); - assertThat(getBalance(results.get(1))).isEqualTo(INITIAL_BALANCE); - assertThat(results.get(1).getInt(SOME_COLUMN)).isEqualTo(1); - - assertThat(results.get(2).getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(results.get(2).getInt(ACCOUNT_TYPE)).isEqualTo(2); - assertThat(getBalance(results.get(2))).isEqualTo(INITIAL_BALANCE); - assertThat(results.get(2).getInt(SOME_COLUMN)).isEqualTo(2); + assertResult(1, 0, results.get(0)); + assertResult(1, 1, results.get(1)); + assertResult(1, 2, results.get(2)); } @Test @@ -473,15 +507,13 @@ public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() throws Transactio new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 1)) .column(IntColumn.of(ACCOUNT_TYPE, 2)) - .column(IntColumn.of(BALANCE, INITIAL_BALANCE)) - .column(IntColumn.of(SOME_COLUMN, 2)) + .columns(prepareNonKeyColumns(1, 2)) .build()); expectedResults.add( new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 2)) .column(IntColumn.of(ACCOUNT_TYPE, 1)) - .column(IntColumn.of(BALANCE, INITIAL_BALANCE)) - .column(IntColumn.of(SOME_COLUMN, 2)) + .columns(prepareNonKeyColumns(2, 1)) .build()); // Act @@ -546,8 +578,7 @@ public void scan_ScanAllGivenForCommittedRecord_ShouldReturnRecords() new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, i)) .column(IntColumn.of(ACCOUNT_TYPE, j)) - .column(IntColumn.of(BALANCE, INITIAL_BALANCE)) - .column(IntColumn.of(SOME_COLUMN, i * j)) + .columns(prepareNonKeyColumns(i, j)) .build()))); TestUtils.assertResultsContainsExactlyInAnyOrder(results, expectedResults); } @@ -556,22 +587,7 @@ public void scan_ScanAllGivenForCommittedRecord_ShouldReturnRecords() public void scan_ScanAllGivenWithLimit_ShouldReturnLimitedAmountOfRecords() throws TransactionException { // Arrange - put( - new Put(Key.ofInt(ACCOUNT_ID, 1), Key.ofInt(ACCOUNT_TYPE, 1)) - .forNamespace(namespace) - .forTable(TABLE)); - put( - new Put(Key.ofInt(ACCOUNT_ID, 1), Key.ofInt(ACCOUNT_TYPE, 2)) - .forNamespace(namespace) - .forTable(TABLE)); - put( - new Put(Key.ofInt(ACCOUNT_ID, 2), Key.ofInt(ACCOUNT_TYPE, 1)) - .forNamespace(namespace) - .forTable(TABLE)); - put( - new Put(Key.ofInt(ACCOUNT_ID, 3), Key.ofInt(ACCOUNT_TYPE, 0)) - .forNamespace(namespace) - .forTable(TABLE)); + insert(prepareInsert(1, 1), prepareInsert(1, 2), prepareInsert(2, 1), prepareInsert(3, 0)); DistributedTransaction scanAllTransaction = manager.start(); ScanAll scanAll = prepareScanAll().withLimit(2); @@ -587,26 +603,22 @@ public void scan_ScanAllGivenWithLimit_ShouldReturnLimitedAmountOfRecords() new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 1)) .column(IntColumn.of(ACCOUNT_TYPE, 1)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(1, 1)) .build(), new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 1)) .column(IntColumn.of(ACCOUNT_TYPE, 2)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(1, 2)) .build(), new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 2)) .column(IntColumn.of(ACCOUNT_TYPE, 1)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(2, 1)) .build(), new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 3)) .column(IntColumn.of(ACCOUNT_TYPE, 0)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(3, 0)) .build())); assertThat(results).hasSize(2); } @@ -656,19 +668,17 @@ public void scanAll_ScanAllGivenForNonExisting_ShouldReturnEmpty() throws Transa @Test public void putAndCommit_PutGivenForNonExisting_ShouldCreateRecord() throws TransactionException { // Arrange - int expected = INITIAL_BALANCE; - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(put::value); DistributedTransaction transaction = manager.start(); // Act - transaction.put(put); + transaction.put(put.build()); transaction.commit(); // Assert @@ -676,28 +686,33 @@ public void putAndCommit_PutGivenForNonExisting_ShouldCreateRecord() throws Tran DistributedTransaction another = manager.start(); Optional result = another.get(get); another.commit(); - assertThat(result.isPresent()).isTrue(); - assertThat(getBalance(result.get())).isEqualTo(expected); + + assertResult(0, 0, result); } @Test public void putAndCommit_PutGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(2, 2).forEach(insert::value); + insert(insert.build()); DistributedTransaction transaction = manager.start(); // Act - int expected = INITIAL_BALANCE + 100; - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .enableImplicitPreRead() - .build(); - transaction.put(put); + .enableImplicitPreRead(); + prepareNonKeyColumns(0, 0).forEach(put::value); + transaction.put(put.build()); transaction.commit(); // Assert @@ -705,8 +720,7 @@ public void putAndCommit_PutGivenForExisting_ShouldUpdateRecord() throws Transac Optional actual = another.get(prepareGet(0, 0)); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test @@ -912,18 +926,25 @@ public void mutateAndCommit_AfterRead_ShouldMutateRecordsProperly() throws Trans @Test public void mutateAndCommit_ShouldMutateRecordsProperly() throws TransactionException { // Arrange - populateRecords(); - Put put = - Put.newBuilder(preparePut(0, 0)) - .intValue(BALANCE, INITIAL_BALANCE - 100) - .enableImplicitPreRead() - .build(); - Delete delete = prepareDelete(1, 0); + InsertBuilder.Buildable insertRecord1 = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insertRecord1::value); + Insert insertRecord2 = prepareInsert(1, 0); + + insert(insertRecord1.build(), insertRecord2); + + PutBuilder.Buildable putRecord1 = Put.newBuilder(preparePut(0, 0)).enableImplicitPreRead(); + prepareNonKeyColumns(0, 0).forEach(putRecord1::value); + Delete deleteRecord2 = prepareDelete(1, 0); DistributedTransaction transaction = manager.begin(); // Act - transaction.mutate(Arrays.asList(put, delete)); + transaction.mutate(Arrays.asList(putRecord1.build(), deleteRecord2)); transaction.commit(); // Assert @@ -932,8 +953,7 @@ public void mutateAndCommit_ShouldMutateRecordsProperly() throws TransactionExce Optional result2 = another.get(prepareGet(1, 0)); another.commit(); - assertThat(result1.isPresent()).isTrue(); - assertThat(result1.get().getInt(BALANCE)).isEqualTo(INITIAL_BALANCE - 100); + assertResult(0, 0, result1); assertThat(result2.isPresent()).isFalse(); } @@ -1341,37 +1361,37 @@ public void mutate_DefaultNamespaceGiven_ShouldWorkProperly() throws Transaction @Test public void put_withPutIfWithVerifiedCondition_shouldPutProperly() throws TransactionException { // Arrange - int someColumnValue = 10; - Put initialData = - Put.newBuilder(preparePut(0, 0)) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, someColumnValue) - .build(); - put(initialData); - - int updatedBalance = 2; - Put putIf = - Put.newBuilder(initialData) - .intValue(BALANCE, updatedBalance) - .condition( - ConditionBuilder.putIf( - ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isNotNullInt()) - .build()) - .enableImplicitPreRead() - .build(); + PutBuilder.Buildable initialData = Put.newBuilder(preparePut(2, 3)); + prepareNonKeyColumns(1, 2).forEach(initialData::value); + put(initialData.build()); + + List conditions = + Lists.newArrayList( + ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE), + ConditionBuilder.column(SOME_COLUMN).isNotNullInt(), + ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true), + ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE), + ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F), + ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10), + ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo"), + ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1)), + ConditionBuilder.column(TIME_COL).isNotNullTime(), + ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); + if (isTimestampTypeSupported()) { + conditions.add( + ConditionBuilder.column(TIMESTAMP_COL) + .isGreaterThanOrEqualToTimestamp(LocalDateTime.of(1970, 1, 1, 1, 2))); + } + PutBuilder.Buildable putIf = Put.newBuilder(initialData.build()).clearValues(); + prepareNonKeyColumns(2, 3).forEach(putIf::value); + putIf.condition(ConditionBuilder.putIf(conditions)).enableImplicitPreRead().build(); // Act - put(putIf); + put(putIf.build()); // Assert - Optional optResult = get(prepareGet(0, 0)); - assertThat(optResult.isPresent()).isTrue(); - Result result = optResult.get(); - assertThat(result.getInt(ACCOUNT_ID)).isEqualTo(0); - assertThat(result.getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(result.getInt(BALANCE)).isEqualTo(updatedBalance); - assertThat(result.getInt(SOME_COLUMN)).isEqualTo(someColumnValue); + Optional optResult = get(prepareGet(2, 3)); + assertResult(2, 3, optResult); } @Test @@ -1426,23 +1446,33 @@ public void put_withPutIfNotExistsWhenRecordDoesNotExist_shouldPutProperly() public void delete_withDeleteIfWithVerifiedCondition_shouldDeleteProperly() throws TransactionException { // Arrange - Put initialData = Put.newBuilder(preparePut(0, 0)).intValue(BALANCE, INITIAL_BALANCE).build(); - put(initialData); + PutBuilder.Buildable initialData = Put.newBuilder(preparePut(1, 2)); + prepareNonKeyColumns(1, 2).forEach(initialData::value); + put(initialData.build()); + + List conditions = + Lists.newArrayList( + ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE), + ConditionBuilder.column(SOME_COLUMN).isNotNullInt(), + ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true), + ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE), + ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F), + ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10), + ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo"), + ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1)), + ConditionBuilder.column(TIME_COL).isNotNullTime(), + ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); Delete deleteIf = - Delete.newBuilder(prepareDelete(0, 0)) - .condition( - ConditionBuilder.deleteIf( - ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isNullInt()) - .build()) + Delete.newBuilder(prepareDelete(1, 2)) + .condition(ConditionBuilder.deleteIf(conditions)) .build(); // Act delete(deleteIf); // Assert - Optional optResult = get(prepareGet(0, 0)); + Optional optResult = get(prepareGet(1, 2)); assertThat(optResult.isPresent()).isFalse(); } @@ -1598,19 +1628,10 @@ public void put_withPutIfWithNonVerifiedCondition_shouldThrowUnsatisfiedConditio public void insertAndCommit_InsertGivenForNonExisting_ShouldCreateRecord() throws TransactionException { // Arrange - int expected = INITIAL_BALANCE; - Insert insert = - Insert.newBuilder() - .namespace(namespace) - .table(TABLE) - .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); DistributedTransaction transaction = manager.start(); // Act - transaction.insert(insert); + transaction.insert(prepareInsert(0, 0)); transaction.commit(); // Assert @@ -1618,8 +1639,7 @@ public void insertAndCommit_InsertGivenForNonExisting_ShouldCreateRecord() DistributedTransaction another = manager.start(); Optional result = another.get(get); another.commit(); - assertThat(result.isPresent()).isTrue(); - assertThat(getBalance(result.get())).isEqualTo(expected); + assertResult(0, 0, result); } @Test @@ -1655,19 +1675,17 @@ public void insertAndCommit_InsertGivenForNonExisting_ShouldCreateRecord() public void upsertAndCommit_UpsertGivenForNonExisting_ShouldCreateRecord() throws TransactionException { // Arrange - int expected = INITIAL_BALANCE; - Upsert upsert = + UpsertBuilder.Buildable upsert = Upsert.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(upsert::value); DistributedTransaction transaction = manager.start(); // Act - transaction.upsert(upsert); + transaction.upsert(upsert.build()); transaction.commit(); // Assert @@ -1675,28 +1693,33 @@ public void upsertAndCommit_UpsertGivenForNonExisting_ShouldCreateRecord() DistributedTransaction another = manager.start(); Optional result = another.get(get); another.commit(); - assertThat(result.isPresent()).isTrue(); - assertThat(getBalance(result.get())).isEqualTo(expected); + + assertResult(0, 0, result); } @Test public void upsertAndCommit_UpsertGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insert::value); + insert(insert.build()); DistributedTransaction transaction = manager.start(); // Act - int expected = INITIAL_BALANCE + 100; - Upsert upsert = + UpsertBuilder.Buildable upsert = Upsert.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); - transaction.upsert(upsert); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(upsert::value); + transaction.upsert(upsert.build()); transaction.commit(); // Assert @@ -1704,8 +1727,7 @@ public void upsertAndCommit_UpsertGivenForExisting_ShouldUpdateRecord() Optional actual = another.get(prepareGet(0, 0)); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test @@ -1760,20 +1782,26 @@ public void updateAndCommit_UpdateGivenForNonExisting_ShouldDoNothing() public void updateAndCommit_UpdateGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insert::value); + insert(insert.build()); + DistributedTransaction transaction = manager.start(); // Act - int expected = INITIAL_BALANCE + 100; - Update update = + UpdateBuilder.Buildable update = Update.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); - transaction.update(update); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(update::value); + transaction.update(update.build()); transaction.commit(); // Assert @@ -1781,29 +1809,34 @@ public void updateAndCommit_UpdateGivenForExisting_ShouldUpdateRecord() Optional actual = another.get(prepareGet(0, 0)); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test public void updateAndCommit_UpdateWithUpdateIfExistsGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insert::value); + insert(insert.build()); + DistributedTransaction transaction = manager.start(); // Act - int expected = INITIAL_BALANCE + 100; - Update update = + UpdateBuilder.Buildable update = Update.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .condition(ConditionBuilder.updateIfExists()) - .build(); - transaction.update(update); + .condition(ConditionBuilder.updateIfExists()); + prepareNonKeyColumns(0, 0).forEach(update::value); + transaction.update(update.build()); transaction.commit(); // Assert @@ -1811,51 +1844,57 @@ public void updateAndCommit_UpdateWithUpdateIfExistsGivenForExisting_ShouldUpdat Optional actual = another.get(prepareGet(0, 0)); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test public void update_withUpdateIfWithVerifiedCondition_shouldUpdateProperly() throws TransactionException { // Arrange - int someColumnValue = 10; - Put initialData = - Put.newBuilder(preparePut(0, 0)) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, someColumnValue) - .build(); - put(initialData); - - int updatedBalance = 2; - Update updateIf = + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(Key.ofInt(ACCOUNT_ID, 2)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 3)); + prepareNonKeyColumns(1, 2).forEach(insert::value); + insert(insert.build()); + + List conditions = + Lists.newArrayList( + ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE), + ConditionBuilder.column(SOME_COLUMN).isNotNullInt(), + ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true), + ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE), + ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F), + ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10), + ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo"), + ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1)), + ConditionBuilder.column(TIME_COL).isNotNullTime(), + ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); + if (isTimestampTypeSupported()) { + conditions.add( + ConditionBuilder.column(TIMESTAMP_COL) + .isGreaterThanOrEqualToTimestamp(LocalDateTime.of(1970, 1, 1, 1, 2))); + } + UpdateBuilder.Buildable updateIf = Update.newBuilder() .namespace(namespace) .table(TABLE) - .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, updatedBalance) - .condition( - ConditionBuilder.updateIf( - ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isNotNullInt()) - .build()) - .build(); + .partitionKey(Key.ofInt(ACCOUNT_ID, 2)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 3)) + .condition(ConditionBuilder.updateIf(conditions)); + prepareNonKeyColumns(2, 3).forEach(updateIf::value); DistributedTransaction transaction = manager.start(); // Act - transaction.update(updateIf); + transaction.update(updateIf.build()); transaction.commit(); // Assert - Optional optResult = get(prepareGet(0, 0)); - assertThat(optResult.isPresent()).isTrue(); - Result result = optResult.get(); - assertThat(result.getInt(ACCOUNT_ID)).isEqualTo(0); - assertThat(result.getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(result.getInt(BALANCE)).isEqualTo(updatedBalance); - assertThat(result.getInt(SOME_COLUMN)).isEqualTo(someColumnValue); + Optional actual = get(prepareGet(2, 3)); + assertResult(2, 3, actual); } @Test @@ -2381,6 +2420,19 @@ protected void put(Put put) throws TransactionException { } } + protected void insert(Insert... insert) throws TransactionException { + DistributedTransaction tx = manager.start(); + try { + for (Insert i : insert) { + tx.insert(i); + } + tx.commit(); + } catch (TransactionException e) { + tx.rollback(); + throw e; + } + } + protected void delete(Delete delete) throws TransactionException { DistributedTransaction tx = manager.start(); try { @@ -2394,29 +2446,24 @@ protected void delete(Delete delete) throws TransactionException { protected void populateRecords() throws TransactionException { DistributedTransaction transaction = manager.start(); - IntStream.range(0, NUM_ACCOUNTS) - .forEach( - i -> - IntStream.range(0, NUM_TYPES) - .forEach( - j -> { - Key partitionKey = Key.ofInt(ACCOUNT_ID, i); - Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, j); - Put put = - Put.newBuilder() - .namespace(namespace) - .table(TABLE) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, i * j) - .build(); - try { - transaction.put(put); - } catch (CrudException e) { - throw new RuntimeException(e); - } - })); + for (int i = 0; i < NUM_ACCOUNTS; i++) { + for (int j = 0; j < NUM_TYPES; j++) { + Key partitionKey = Key.ofInt(ACCOUNT_ID, i); + Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, j); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey); + prepareNonKeyColumns(i, j).forEach(insert::value); + try { + transaction.insert(insert.build()); + } catch (CrudException e) { + throw new RuntimeException(e); + } + } + } transaction.commit(); } @@ -2473,6 +2520,20 @@ protected Put preparePut(int id, int type) { .withConsistency(Consistency.LINEARIZABLE); } + protected Insert prepareInsert(int id, int type) { + Key partitionKey = Key.ofInt(ACCOUNT_ID, id); + Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, type); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace) + .table(TABLE) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey); + prepareNonKeyColumns(id, type).forEach(insert::value); + + return insert.build(); + } + protected List preparePuts() { List puts = new ArrayList<>(); IntStream.range(0, NUM_ACCOUNTS) @@ -2495,4 +2556,118 @@ protected int getBalance(Result result) { assertThat(balance).isPresent(); return balance.get().getAsInt(); } + + protected boolean isTimestampTypeSupported() { + return true; + } + + private void assertResult(int accountId, int accountType, Optional optResult) { + assertResult(accountId, accountType, optResult.orElse(null)); + } + + private void assertResult(int accountId, int accountType, Result result) { + String resultErrorMessage = + String.format("Result { accountId=%d, accountType=%d }", accountId, accountType); + + assertThat(result).describedAs(resultErrorMessage + " is null").isNotNull(); + + List columns = + Lists.newArrayList( + ACCOUNT_ID, + ACCOUNT_TYPE, + BALANCE, + SOME_COLUMN, + BOOLEAN_COL, + BIGINT_COL, + FLOAT_COL, + DOUBLE_COL, + TEXT_COL, + BLOB_COL, + DATE_COL, + TIME_COL, + TIMESTAMPTZ_COL); + if (isTimestampTypeSupported()) { + columns.add(TIMESTAMP_COL); + } + assertThat(result.getContainedColumnNames()) + .describedAs("Columns are missing. %s", resultErrorMessage) + .containsExactlyInAnyOrderElementsOf(columns); + for (String column : columns) { + assertThat(result.isNull(column)) + .describedAs("Column {%s} is null. %s", column, resultErrorMessage) + .isFalse(); + } + + String columnMessage = "Unexpected value for column {%s}. %s"; + assertThat(result.getInt(ACCOUNT_ID)) + .describedAs(columnMessage, ACCOUNT_ID, resultErrorMessage) + .isEqualTo(accountId); + assertThat(result.getInt(ACCOUNT_TYPE)) + .describedAs(columnMessage, ACCOUNT_TYPE, resultErrorMessage) + .isEqualTo(accountType); + assertThat(result.getInt(BALANCE)) + .describedAs(columnMessage, BALANCE, resultErrorMessage) + .isEqualTo(INITIAL_BALANCE); + assertThat(result.getInt(SOME_COLUMN)) + .describedAs(columnMessage, SOME_COLUMN, resultErrorMessage) + .isEqualTo(accountId * accountType); + assertThat(result.getBoolean(BOOLEAN_COL)) + .describedAs(columnMessage, BOOLEAN_COL, resultErrorMessage) + .isEqualTo(accountId % 2 == 0); + assertThat(result.getBigInt(BIGINT_COL)) + .describedAs(columnMessage, BIGINT_COL, resultErrorMessage) + .isEqualTo((long) Math.pow(accountId, accountType)); + assertThat(result.getFloat(FLOAT_COL)) + .describedAs(columnMessage, FLOAT_COL, resultErrorMessage) + .isEqualTo(Float.parseFloat("0." + accountId + accountType)); + assertThat(result.getDouble(DOUBLE_COL)) + .describedAs(columnMessage, DOUBLE_COL, resultErrorMessage) + .isEqualTo(Float.parseFloat("10." + accountId + accountType)); + assertThat(result.getText(TEXT_COL)) + .describedAs(columnMessage, TEXT_COL, resultErrorMessage) + .isEqualTo(accountId + "" + accountType); + assertThat(result.getBlobAsBytes(BLOB_COL)) + .describedAs(columnMessage, BLOB_COL, resultErrorMessage) + .isEqualTo((accountId + "" + accountType).getBytes(StandardCharsets.UTF_8)); + assertThat(result.getDate(DATE_COL)) + .describedAs(columnMessage, DATE_COL, resultErrorMessage) + .isEqualTo(LocalDate.ofEpochDay(accountId + accountType)); + assertThat(result.getTime(TIME_COL)) + .describedAs(columnMessage, TIME_COL, resultErrorMessage) + .isEqualTo(LocalTime.of(accountId, accountType)); + assertThat(result.getTimestampTZ(TIMESTAMPTZ_COL)) + .describedAs(columnMessage, TIMESTAMPTZ_COL, resultErrorMessage) + .isEqualTo(LocalDateTime.of(1970, 1, 1, accountId, accountType).toInstant(ZoneOffset.UTC)); + if (isTimestampTypeSupported()) { + assertThat(result.getTimestamp(TIMESTAMP_COL)) + .describedAs(columnMessage, TIMESTAMP_COL, resultErrorMessage) + .isEqualTo(LocalDateTime.of(1970, 1, 1, accountId, accountType)); + } + } + + protected List> prepareNonKeyColumns(int accountId, int accountType) { + ImmutableList.Builder> columns = + new ImmutableList.Builder>() + .add( + IntColumn.of(BALANCE, INITIAL_BALANCE), + IntColumn.of(SOME_COLUMN, accountId * accountType), + BooleanColumn.of(BOOLEAN_COL, accountId % 2 == 0), + BigIntColumn.of(BIGINT_COL, (long) Math.pow(accountId, accountType)), + FloatColumn.of(FLOAT_COL, Float.parseFloat("0." + accountId + accountType)), + DoubleColumn.of(DOUBLE_COL, Float.parseFloat("10." + accountId + accountType)), + TextColumn.of(TEXT_COL, accountId + "" + accountType), + BlobColumn.of( + BLOB_COL, (accountId + "" + accountType).getBytes(StandardCharsets.UTF_8)), + DateColumn.of(DATE_COL, LocalDate.ofEpochDay(accountId + accountType)), + TimeColumn.of(TIME_COL, LocalTime.of(accountId, accountType)), + TimestampTZColumn.of( + TIMESTAMPTZ_COL, + LocalDateTime.of(1970, 1, 1, accountId, accountType) + .toInstant(ZoneOffset.UTC))); + if (isTimestampTypeSupported()) { + columns.add( + TimestampColumn.of(TIMESTAMP_COL, LocalDateTime.of(1970, 1, 1, accountId, accountType))); + } + return columns.build(); + } } diff --git a/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java index 57fb66c263..6587cd79a1 100644 --- a/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java @@ -16,15 +16,32 @@ import com.scalar.db.exception.transaction.TransactionException; import com.scalar.db.exception.transaction.TransactionNotFoundException; import com.scalar.db.exception.transaction.UnsatisfiedConditionException; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.IntValue; import com.scalar.db.io.Key; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import com.scalar.db.io.Value; import com.scalar.db.service.TransactionFactory; import com.scalar.db.util.TestUtils; import com.scalar.db.util.TestUtils.ExpectedResult; import com.scalar.db.util.TestUtils.ExpectedResult.ExpectedResultBuilder; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -34,6 +51,7 @@ import java.util.Properties; import java.util.stream.IntStream; import org.assertj.core.api.Assertions; +import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -54,19 +72,19 @@ public abstract class TwoPhaseCommitTransactionIntegrationTestBase { protected static final String ACCOUNT_TYPE = "account_type"; protected static final String BALANCE = "balance"; protected static final String SOME_COLUMN = "some_column"; + protected static final String BOOLEAN_COL = "boolean_col"; + protected static final String BIGINT_COL = "bigint_col"; + protected static final String FLOAT_COL = "float_col"; + protected static final String DOUBLE_COL = "double_col"; + protected static final String TEXT_COL = "text_col"; + protected static final String BLOB_COL = "blob_col"; + protected static final String DATE_COL = "date_col"; + protected static final String TIME_COL = "time_col"; + protected static final String TIMESTAMP_COL = "timestamp_col"; + protected static final String TIMESTAMPTZ_COL = "timestamptz_col"; protected static final int INITIAL_BALANCE = 1000; protected static final int NUM_ACCOUNTS = 4; protected static final int NUM_TYPES = 4; - protected static final TableMetadata TABLE_METADATA = - TableMetadata.newBuilder() - .addColumn(ACCOUNT_ID, DataType.INT) - .addColumn(ACCOUNT_TYPE, DataType.INT) - .addColumn(BALANCE, DataType.INT) - .addColumn(SOME_COLUMN, DataType.INT) - .addPartitionKey(ACCOUNT_ID) - .addClusteringKey(ACCOUNT_TYPE) - .addSecondaryIndex(SOME_COLUMN) - .build(); protected DistributedTransactionAdmin admin1; protected DistributedTransactionAdmin admin2; protected TwoPhaseCommitTransactionManager manager1; @@ -105,12 +123,34 @@ protected String getNamespaceBaseName() { } private void createTables() throws ExecutionException { + TableMetadata.Builder tableMetadata = + TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.INT) + .addColumn(BOOLEAN_COL, DataType.BOOLEAN) + .addColumn(BIGINT_COL, DataType.BIGINT) + .addColumn(FLOAT_COL, DataType.FLOAT) + .addColumn(DOUBLE_COL, DataType.DOUBLE) + .addColumn(TEXT_COL, DataType.TEXT) + .addColumn(BLOB_COL, DataType.BLOB) + .addColumn(DATE_COL, DataType.DATE) + .addColumn(TIME_COL, DataType.TIME) + .addColumn(TIMESTAMPTZ_COL, DataType.TIMESTAMPTZ) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .addSecondaryIndex(SOME_COLUMN); + if (isTimestampTypeSupported()) { + tableMetadata.addColumn(TIMESTAMP_COL, DataType.TIMESTAMP); + } + Map options = getCreationOptions(); admin1.createCoordinatorTables(true, options); admin1.createNamespace(namespace1, true, options); - admin1.createTable(namespace1, TABLE_1, TABLE_METADATA, true, options); + admin1.createTable(namespace1, TABLE_1, tableMetadata.build(), true, options); admin2.createNamespace(namespace2, true, options); - admin2.createTable(namespace2, TABLE_2, TABLE_METADATA, true, options); + admin2.createTable(namespace2, TABLE_2, tableMetadata.build(), true, options); } protected Map getCreationOptions() { @@ -178,7 +218,7 @@ public void get_GetGivenForCommittedRecord_ShouldReturnRecord() throws Transacti // Arrange populateRecords(manager1, namespace1, TABLE_1); TwoPhaseCommitTransaction transaction = manager1.start(); - Get get = prepareGet(0, 0, namespace1, TABLE_1); + Get get = prepareGet(2, 3, namespace1, TABLE_1); // Act Optional result = transaction.get(get); @@ -187,11 +227,7 @@ public void get_GetGivenForCommittedRecord_ShouldReturnRecord() throws Transacti transaction.commit(); // Assert - assertThat(result.isPresent()).isTrue(); - assertThat(result.get().getInt(ACCOUNT_ID)).isEqualTo(0); - assertThat(result.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(getBalance(result.get())).isEqualTo(INITIAL_BALANCE); - assertThat(result.get().getInt(SOME_COLUMN)).isEqualTo(0); + assertResult(2, 3, result); } @Test @@ -226,24 +262,32 @@ public void get_GetWithMatchedConjunctionsGivenForCommittedRecord_ShouldReturnRe // Arrange populateRecords(manager1, namespace1, TABLE_1); TwoPhaseCommitTransaction transaction = manager1.start(); - Get get = - Get.newBuilder(prepareGet(1, 1, namespace1, TABLE_1)) + GetBuilder.BuildableGetFromExistingWithOngoingWhereAnd get = + Get.newBuilder(prepareGet(1, 2, namespace1, TABLE_1)) .where(ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isEqualToInt(1)) - .build(); + .and(ConditionBuilder.column(SOME_COLUMN).isEqualToInt(2)) + .and(ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true)) + .and(ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE)) + .and(ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F)) + .and(ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10)) + .and(ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo")) + .and(ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1))) + .and(ConditionBuilder.column(TIME_COL).isNotNullTime()) + .and(ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); + if (isTimestampTypeSupported()) { + get.and( + ConditionBuilder.column(TIMESTAMP_COL) + .isGreaterThanOrEqualToTimestamp(LocalDateTime.of(1970, 1, 1, 1, 2))); + } // Act - Optional result = transaction.get(get); + Optional result = transaction.get(get.build()); transaction.prepare(); transaction.validate(); transaction.commit(); // Assert - assertThat(result.isPresent()).isTrue(); - assertThat(result.get().getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(result.get().getInt(ACCOUNT_TYPE)).isEqualTo(1); - assertThat(getBalance(result.get())).isEqualTo(INITIAL_BALANCE); - assertThat(result.get().getInt(SOME_COLUMN)).isEqualTo(1); + assertResult(1, 2, result); } @Test @@ -283,20 +327,9 @@ public void scan_ScanGivenForCommittedRecord_ShouldReturnRecords() throws Transa // Assert assertThat(results.size()).isEqualTo(3); - assertThat(results.get(0).getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(results.get(0).getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(getBalance(results.get(0))).isEqualTo(INITIAL_BALANCE); - assertThat(results.get(0).getInt(SOME_COLUMN)).isEqualTo(0); - - assertThat(results.get(1).getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(results.get(1).getInt(ACCOUNT_TYPE)).isEqualTo(1); - assertThat(getBalance(results.get(1))).isEqualTo(INITIAL_BALANCE); - assertThat(results.get(1).getInt(SOME_COLUMN)).isEqualTo(1); - - assertThat(results.get(2).getInt(ACCOUNT_ID)).isEqualTo(1); - assertThat(results.get(2).getInt(ACCOUNT_TYPE)).isEqualTo(2); - assertThat(getBalance(results.get(2))).isEqualTo(INITIAL_BALANCE); - assertThat(results.get(2).getInt(SOME_COLUMN)).isEqualTo(2); + assertResult(1, 0, results.get(0)); + assertResult(1, 1, results.get(1)); + assertResult(1, 2, results.get(2)); } @Test @@ -531,15 +564,13 @@ public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() throws Transactio new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 1)) .column(IntColumn.of(ACCOUNT_TYPE, 2)) - .column(IntColumn.of(BALANCE, INITIAL_BALANCE)) - .column(IntColumn.of(SOME_COLUMN, 2)) + .columns(prepareNonKeyColumns(1, 2)) .build()); expectedResults.add( new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 2)) .column(IntColumn.of(ACCOUNT_TYPE, 1)) - .column(IntColumn.of(BALANCE, INITIAL_BALANCE)) - .column(IntColumn.of(SOME_COLUMN, 2)) + .columns(prepareNonKeyColumns(2, 1)) .build()); // Act @@ -557,19 +588,17 @@ public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() throws Transactio @Test public void putAndCommit_PutGivenForNonExisting_ShouldCreateRecord() throws TransactionException { // Arrange - int expected = INITIAL_BALANCE; - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace1) .table(TABLE_1) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(put::value); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - transaction.put(put); + transaction.put(put.build()); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -581,28 +610,33 @@ public void putAndCommit_PutGivenForNonExisting_ShouldCreateRecord() throws Tran another.prepare(); another.validate(); another.commit(); - assertThat(result.isPresent()).isTrue(); - assertThat(getBalance(result.get())).isEqualTo(expected); + + assertResult(0, 0, result); } @Test public void putAndCommit_PutGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(manager1, namespace1, TABLE_1); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(2, 2).forEach(insert::value); + insert(insert.build()); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - int expected = INITIAL_BALANCE + 100; - Put put = + PutBuilder.Buildable put = Put.newBuilder() .namespace(namespace1) .table(TABLE_1) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .enableImplicitPreRead() - .build(); - transaction.put(put); + .enableImplicitPreRead(); + prepareNonKeyColumns(0, 0).forEach(put::value); + transaction.put(put.build()); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -614,8 +648,7 @@ public void putAndCommit_PutGivenForExisting_ShouldUpdateRecord() throws Transac another.validate(); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test @@ -849,18 +882,26 @@ public void mutateAndCommit_AfterRead_ShouldMutateRecordsProperly() throws Trans @Test public void mutateAndCommit_ShouldMutateRecordsProperly() throws TransactionException { // Arrange - populateRecords(manager1, namespace1, TABLE_1); - Put put = - Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)) - .intValue(BALANCE, INITIAL_BALANCE - 100) - .enableImplicitPreRead() - .build(); - Delete delete = prepareDelete(1, 0, namespace1, TABLE_1); + InsertBuilder.Buildable insertRecord1 = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insertRecord1::value); + Insert insertRecord2 = prepareInsert(1, 0, namespace1, TABLE_1); + + insert(insertRecord1.build(), insertRecord2); + + PutBuilder.Buildable putRecord1 = + Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)).enableImplicitPreRead(); + prepareNonKeyColumns(0, 0).forEach(putRecord1::value); + Delete deleteRecord2 = prepareDelete(1, 0, namespace1, TABLE_1); TwoPhaseCommitTransaction transaction = manager1.begin(); // Act - transaction.mutate(Arrays.asList(put, delete)); + transaction.mutate(Arrays.asList(putRecord1.build(), deleteRecord2)); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -873,8 +914,7 @@ public void mutateAndCommit_ShouldMutateRecordsProperly() throws TransactionExce another.validate(); another.commit(); - assertThat(result1.isPresent()).isTrue(); - assertThat(result1.get().getInt(BALANCE)).isEqualTo(INITIAL_BALANCE - 100); + assertResult(0, 0, result1); assertThat(result2.isPresent()).isFalse(); } @@ -882,22 +922,28 @@ public void mutateAndCommit_ShouldMutateRecordsProperly() throws TransactionExce public void mutateAndCommit_WithMultipleSubTransactions_ShouldMutateRecordsProperly() throws TransactionException { // Arrange - populateRecords(manager1, namespace1, TABLE_1); - populateRecords(manager2, namespace2, TABLE_2); + InsertBuilder.Buildable insertForTable1 = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insertForTable1::value); + Insert insertForTable2 = prepareInsert(1, 0, namespace2, TABLE_2); - Put put = - Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)) - .intValue(BALANCE, INITIAL_BALANCE - 100) - .enableImplicitPreRead() - .build(); - Delete delete = prepareDelete(1, 0, namespace2, TABLE_2); + insert(insertForTable1.build(), insertForTable2); + + PutBuilder.Buildable putForTable1 = + Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)).enableImplicitPreRead(); + prepareNonKeyColumns(0, 0).forEach(putForTable1::value); + Delete deleteForTable2 = prepareDelete(1, 0, namespace2, TABLE_2); TwoPhaseCommitTransaction transaction1 = manager1.begin(); TwoPhaseCommitTransaction transaction2 = manager2.join(transaction1.getId()); // Act - transaction1.put(put); - transaction2.delete(delete); + transaction1.put(putForTable1.build()); + transaction2.delete(deleteForTable2); // Prepare transaction1.prepare(); @@ -929,8 +975,7 @@ public void mutateAndCommit_WithMultipleSubTransactions_ShouldMutateRecordsPrope another1.commit(); another2.commit(); - assertThat(result1.isPresent()).isTrue(); - assertThat(result1.get().getInt(BALANCE)).isEqualTo(INITIAL_BALANCE - 100); + assertResult(0, 0, result1); assertThat(result2.isPresent()).isFalse(); } @@ -1099,8 +1144,7 @@ public void scan_ScanAllGivenForCommittedRecord_ShouldReturnRecords() new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, i)) .column(IntColumn.of(ACCOUNT_TYPE, j)) - .column(IntColumn.of(BALANCE, INITIAL_BALANCE)) - .column(IntColumn.of(SOME_COLUMN, i * j)) + .columns(prepareNonKeyColumns(i, j)) .build()))); TestUtils.assertResultsContainsExactlyInAnyOrder(results, expectedResults); } @@ -1110,20 +1154,11 @@ public void scan_ScanAllGivenWithLimit_ShouldReturnLimitedAmountOfRecords() throws TransactionException { // Arrange TwoPhaseCommitTransaction putTransaction = manager1.begin(); - putTransaction.put( - Arrays.asList( - new Put(Key.ofInt(ACCOUNT_ID, 1), Key.ofInt(ACCOUNT_TYPE, 1)) - .forNamespace(namespace1) - .forTable(TABLE_1), - new Put(Key.ofInt(ACCOUNT_ID, 1), Key.ofInt(ACCOUNT_TYPE, 2)) - .forNamespace(namespace1) - .forTable(TABLE_1), - new Put(Key.ofInt(ACCOUNT_ID, 2), Key.ofInt(ACCOUNT_TYPE, 1)) - .forNamespace(namespace1) - .forTable(TABLE_1), - new Put(Key.ofInt(ACCOUNT_ID, 3), Key.ofInt(ACCOUNT_TYPE, 0)) - .forNamespace(namespace1) - .forTable(TABLE_1))); + insert( + prepareInsert(1, 1, namespace1, TABLE_1), + prepareInsert(1, 2, namespace1, TABLE_1), + prepareInsert(2, 1, namespace1, TABLE_1), + prepareInsert(3, 0, namespace1, TABLE_1)); putTransaction.prepare(); putTransaction.validate(); putTransaction.commit(); @@ -1144,26 +1179,22 @@ public void scan_ScanAllGivenWithLimit_ShouldReturnLimitedAmountOfRecords() new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 1)) .column(IntColumn.of(ACCOUNT_TYPE, 1)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(1, 1)) .build(), new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 1)) .column(IntColumn.of(ACCOUNT_TYPE, 2)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(1, 2)) .build(), new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 2)) .column(IntColumn.of(ACCOUNT_TYPE, 1)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(2, 1)) .build(), new ExpectedResultBuilder() .column(IntColumn.of(ACCOUNT_ID, 3)) .column(IntColumn.of(ACCOUNT_TYPE, 0)) - .column(IntColumn.ofNull(BALANCE)) - .column(IntColumn.ofNull(SOME_COLUMN)) + .columns(prepareNonKeyColumns(3, 0)) .build())); assertThat(results).hasSize(2); } @@ -1565,37 +1596,37 @@ public void mutate_DefaultNamespaceGiven_ShouldWorkProperly() throws Transaction @Test public void put_withPutIfWithVerifiedCondition_shouldPutProperly() throws TransactionException { // Arrange - int someColumnValue = 10; - Put initialData = - Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, someColumnValue) - .build(); - put(initialData); - - int updatedBalance = 2; - Put putIf = - Put.newBuilder(initialData) - .intValue(BALANCE, updatedBalance) - .condition( - ConditionBuilder.putIf( - ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isNotNullInt()) - .build()) - .enableImplicitPreRead() - .build(); + PutBuilder.Buildable initialData = Put.newBuilder(preparePut(2, 3, namespace1, TABLE_1)); + prepareNonKeyColumns(1, 2).forEach(initialData::value); + put(initialData.build()); + + List conditions = + Lists.newArrayList( + ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE), + ConditionBuilder.column(SOME_COLUMN).isNotNullInt(), + ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true), + ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE), + ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F), + ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10), + ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo"), + ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1)), + ConditionBuilder.column(TIME_COL).isNotNullTime(), + ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); + if (isTimestampTypeSupported()) { + conditions.add( + ConditionBuilder.column(TIMESTAMP_COL) + .isGreaterThanOrEqualToTimestamp(LocalDateTime.of(1970, 1, 1, 1, 2))); + } + PutBuilder.Buildable putIf = Put.newBuilder(initialData.build()).clearValues(); + prepareNonKeyColumns(2, 3).forEach(putIf::value); + putIf.condition(ConditionBuilder.putIf(conditions)).enableImplicitPreRead().build(); // Act - put(putIf); + put(putIf.build()); // Assert - Optional optResult = get(prepareGet(0, 0, namespace1, TABLE_1)); - assertThat(optResult.isPresent()).isTrue(); - Result result = optResult.get(); - assertThat(result.getInt(ACCOUNT_ID)).isEqualTo(0); - assertThat(result.getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(result.getInt(BALANCE)).isEqualTo(updatedBalance); - assertThat(result.getInt(SOME_COLUMN)).isEqualTo(someColumnValue); + Optional optResult = get(prepareGet(2, 3, namespace1, TABLE_1)); + assertResult(2, 3, optResult); } @Test @@ -1745,26 +1776,33 @@ public void put_withPutIfNotExistsWhenRecordExists_shouldThrowUnsatisfiedConditi public void delete_withDeleteIfWithVerifiedCondition_shouldDeleteProperly() throws TransactionException { // Arrange - Put initialData = - Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)) - .intValue(BALANCE, INITIAL_BALANCE) - .build(); - put(initialData); + PutBuilder.Buildable initialData = Put.newBuilder(preparePut(1, 2, namespace1, TABLE_1)); + prepareNonKeyColumns(1, 2).forEach(initialData::value); + put(initialData.build()); + + List conditions = + Lists.newArrayList( + ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE), + ConditionBuilder.column(SOME_COLUMN).isNotNullInt(), + ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true), + ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE), + ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F), + ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10), + ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo"), + ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1)), + ConditionBuilder.column(TIME_COL).isNotNullTime(), + ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); Delete deleteIf = - Delete.newBuilder(prepareDelete(0, 0, namespace1, TABLE_1)) - .condition( - ConditionBuilder.deleteIf( - ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isNullInt()) - .build()) + Delete.newBuilder(prepareDelete(1, 2, namespace1, TABLE_1)) + .condition(ConditionBuilder.deleteIf(conditions)) .build(); // Act delete(deleteIf); // Assert - Optional optResult = get(prepareGet(0, 0, namespace1, TABLE_1)); + Optional optResult = get(prepareGet(1, 2, namespace1, TABLE_1)); assertThat(optResult.isPresent()).isFalse(); } @@ -1834,19 +1872,10 @@ public void delete_withDeleteIfExistsWhenRecordsExists_shouldDeleteProperly() public void insertAndCommit_InsertGivenForNonExisting_ShouldCreateRecord() throws TransactionException { // Arrange - int expected = INITIAL_BALANCE; - Insert insert = - Insert.newBuilder() - .namespace(namespace1) - .table(TABLE_1) - .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - transaction.insert(insert); + transaction.insert(prepareInsert(0, 0, namespace1, TABLE_1)); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -1858,8 +1887,7 @@ public void insertAndCommit_InsertGivenForNonExisting_ShouldCreateRecord() another.prepare(); another.validate(); another.commit(); - assertThat(result.isPresent()).isTrue(); - assertThat(getBalance(result.get())).isEqualTo(expected); + assertResult(0, 0, result); } @Test @@ -1897,19 +1925,17 @@ public void insertAndCommit_InsertGivenForNonExisting_ShouldCreateRecord() public void upsertAndCommit_UpsertGivenForNonExisting_ShouldCreateRecord() throws TransactionException { // Arrange - int expected = INITIAL_BALANCE; - Upsert upsert = + UpsertBuilder.Buildable upsert = Upsert.newBuilder() .namespace(namespace1) .table(TABLE_1) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(upsert::value); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - transaction.upsert(upsert); + transaction.upsert(upsert.build()); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -1921,28 +1947,32 @@ public void upsertAndCommit_UpsertGivenForNonExisting_ShouldCreateRecord() another.prepare(); another.validate(); another.commit(); - assertThat(result.isPresent()).isTrue(); - assertThat(getBalance(result.get())).isEqualTo(expected); + assertResult(0, 0, result); } @Test public void upsertAndCommit_UpsertGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(manager1, namespace1, TABLE_1); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insert::value); + insert(insert.build()); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - int expected = INITIAL_BALANCE + 100; - Upsert upsert = + UpsertBuilder.Buildable upsert = Upsert.newBuilder() .namespace(namespace1) .table(TABLE_1) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); - transaction.upsert(upsert); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(upsert::value); + transaction.upsert(upsert.build()); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -1955,7 +1985,7 @@ public void upsertAndCommit_UpsertGivenForExisting_ShouldUpdateRecord() another.commit(); assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test @@ -2014,20 +2044,25 @@ public void updateAndCommit_UpdateGivenForNonExisting_ShouldDoNothing() public void updateAndCommit_UpdateGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(manager1, namespace1, TABLE_1); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insert::value); + insert(insert.build()); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - int expected = INITIAL_BALANCE + 100; - Update update = + UpdateBuilder.Buildable update = Update.newBuilder() .namespace(namespace1) .table(TABLE_1) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .build(); - transaction.update(update); + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(0, 0).forEach(update::value); + transaction.update(update.build()); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -2039,29 +2074,33 @@ public void updateAndCommit_UpdateGivenForExisting_ShouldUpdateRecord() another.validate(); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test public void updateAndCommit_UpdateWithUpdateIfExistsGivenForExisting_ShouldUpdateRecord() throws TransactionException { // Arrange - populateRecords(manager1, namespace1, TABLE_1); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)); + prepareNonKeyColumns(1, 1).forEach(insert::value); + insert(insert.build()); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - int expected = INITIAL_BALANCE + 100; - Update update = + UpdateBuilder.Buildable update = Update.newBuilder() .namespace(namespace1) .table(TABLE_1) .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, expected) - .condition(ConditionBuilder.updateIfExists()) - .build(); - transaction.update(update); + .condition(ConditionBuilder.updateIfExists()); + prepareNonKeyColumns(0, 0).forEach(update::value); + transaction.update(update.build()); transaction.prepare(); transaction.validate(); transaction.commit(); @@ -2073,53 +2112,59 @@ public void updateAndCommit_UpdateWithUpdateIfExistsGivenForExisting_ShouldUpdat another.validate(); another.commit(); - assertThat(actual.isPresent()).isTrue(); - assertThat(getBalance(actual.get())).isEqualTo(expected); + assertResult(0, 0, actual); } @Test public void update_withUpdateIfWithVerifiedCondition_shouldUpdateProperly() throws TransactionException { // Arrange - int someColumnValue = 10; - Put initialData = - Put.newBuilder(preparePut(0, 0, namespace1, TABLE_1)) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, someColumnValue) - .build(); - put(initialData); - - int updatedBalance = 2; - Update updateIf = + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 2)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 3)); + prepareNonKeyColumns(1, 2).forEach(insert::value); + insert(insert.build()); + + List conditions = + Lists.newArrayList( + ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE), + ConditionBuilder.column(SOME_COLUMN).isNotNullInt(), + ConditionBuilder.column(BOOLEAN_COL).isNotEqualToBoolean(true), + ConditionBuilder.column(BIGINT_COL).isLessThanBigInt(BigIntColumn.MAX_VALUE), + ConditionBuilder.column(FLOAT_COL).isEqualToFloat(0.12F), + ConditionBuilder.column(DOUBLE_COL).isGreaterThanDouble(-10), + ConditionBuilder.column(TEXT_COL).isNotEqualToText("foo"), + ConditionBuilder.column(DATE_COL).isLessThanDate(LocalDate.of(3000, 1, 1)), + ConditionBuilder.column(TIME_COL).isNotNullTime(), + ConditionBuilder.column(TIMESTAMPTZ_COL).isNotEqualToTimestampTZ(Instant.EPOCH)); + if (isTimestampTypeSupported()) { + conditions.add( + ConditionBuilder.column(TIMESTAMP_COL) + .isGreaterThanOrEqualToTimestamp(LocalDateTime.of(1970, 1, 1, 1, 2))); + } + UpdateBuilder.Buildable updateIf = Update.newBuilder() .namespace(namespace1) .table(TABLE_1) - .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) - .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) - .intValue(BALANCE, updatedBalance) - .condition( - ConditionBuilder.updateIf( - ConditionBuilder.column(BALANCE).isEqualToInt(INITIAL_BALANCE)) - .and(ConditionBuilder.column(SOME_COLUMN).isNotNullInt()) - .build()) - .build(); + .partitionKey(Key.ofInt(ACCOUNT_ID, 2)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 3)) + .condition(ConditionBuilder.updateIf(conditions)); + prepareNonKeyColumns(2, 3).forEach(updateIf::value); TwoPhaseCommitTransaction transaction = manager1.start(); // Act - transaction.update(updateIf); + transaction.update(updateIf.build()); transaction.prepare(); transaction.validate(); transaction.commit(); // Assert - Optional optResult = get(prepareGet(0, 0, namespace1, TABLE_1)); - assertThat(optResult.isPresent()).isTrue(); - Result result = optResult.get(); - assertThat(result.getInt(ACCOUNT_ID)).isEqualTo(0); - assertThat(result.getInt(ACCOUNT_TYPE)).isEqualTo(0); - assertThat(result.getInt(BALANCE)).isEqualTo(updatedBalance); - assertThat(result.getInt(SOME_COLUMN)).isEqualTo(someColumnValue); + Optional actual = get(prepareGet(2, 3, namespace1, TABLE_1)); + assertResult(2, 3, actual); } @Test @@ -2652,6 +2697,21 @@ private void put(Put put) throws TransactionException { } } + protected void insert(Insert... insert) throws TransactionException { + TwoPhaseCommitTransaction tx = manager1.start(); + try { + for (Insert i : insert) { + tx.insert(i); + } + tx.prepare(); + tx.validate(); + tx.commit(); + } catch (TransactionException e) { + tx.rollback(); + throw e; + } + } + private void delete(Delete delete) throws TransactionException { TwoPhaseCommitTransaction tx = manager1.start(); try { @@ -2669,29 +2729,24 @@ protected void populateRecords( TwoPhaseCommitTransactionManager manager1, String namespaceName, String tableName) throws TransactionException { TwoPhaseCommitTransaction transaction = manager1.begin(); - IntStream.range(0, NUM_ACCOUNTS) - .forEach( - i -> - IntStream.range(0, NUM_TYPES) - .forEach( - j -> { - Key partitionKey = Key.ofInt(ACCOUNT_ID, i); - Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, j); - Put put = - Put.newBuilder() - .namespace(namespaceName) - .table(tableName) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, i * j) - .build(); - try { - transaction.put(put); - } catch (CrudException e) { - throw new RuntimeException(e); - } - })); + for (int i = 0; i < NUM_ACCOUNTS; i++) { + for (int j = 0; j < NUM_TYPES; j++) { + Key partitionKey = Key.ofInt(ACCOUNT_ID, i); + Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, j); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespaceName) + .table(tableName) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey); + prepareNonKeyColumns(i, j).forEach(insert::value); + try { + transaction.insert(insert.build()); + } catch (CrudException e) { + throw new RuntimeException(e); + } + } + } transaction.prepare(); transaction.validate(); transaction.commit(); @@ -2764,6 +2819,20 @@ protected Put preparePut(int id, int type, String namespaceName, String tableNam .withConsistency(Consistency.LINEARIZABLE); } + protected Insert prepareInsert(int id, int type, String namespaceName, String tableName) { + Key partitionKey = Key.ofInt(ACCOUNT_ID, id); + Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, type); + InsertBuilder.Buildable insert = + Insert.newBuilder() + .namespace(namespaceName) + .table(tableName) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey); + prepareNonKeyColumns(id, type).forEach(insert::value); + + return insert.build(); + } + protected List preparePuts(String namespaceName, String tableName) { List puts = new ArrayList<>(); IntStream.range(0, NUM_ACCOUNTS) @@ -2789,4 +2858,118 @@ protected int getBalance(Result result) { assertThat(balance).isPresent(); return balance.get().getAsInt(); } + + protected boolean isTimestampTypeSupported() { + return true; + } + + private void assertResult(int accountId, int accountType, Optional optResult) { + assertResult(accountId, accountType, optResult.orElse(null)); + } + + private void assertResult(int accountId, int accountType, Result result) { + String resultErrorMessage = + String.format("Result { accountId=%d, accountType=%d }", accountId, accountType); + + assertThat(result).describedAs(resultErrorMessage + " is null").isNotNull(); + + List columns = + Lists.newArrayList( + ACCOUNT_ID, + ACCOUNT_TYPE, + BALANCE, + SOME_COLUMN, + BOOLEAN_COL, + BIGINT_COL, + FLOAT_COL, + DOUBLE_COL, + TEXT_COL, + BLOB_COL, + DATE_COL, + TIME_COL, + TIMESTAMPTZ_COL); + if (isTimestampTypeSupported()) { + columns.add(TIMESTAMP_COL); + } + assertThat(result.getContainedColumnNames()) + .describedAs("Columns are missing. %s", resultErrorMessage) + .containsExactlyInAnyOrderElementsOf(columns); + for (String column : columns) { + assertThat(result.isNull(column)) + .describedAs("Column {%s} is null. %s", column, resultErrorMessage) + .isFalse(); + } + + String columnMessage = "Unexpected value for column {%s}. %s"; + assertThat(result.getInt(ACCOUNT_ID)) + .describedAs(columnMessage, ACCOUNT_ID, resultErrorMessage) + .isEqualTo(accountId); + assertThat(result.getInt(ACCOUNT_TYPE)) + .describedAs(columnMessage, ACCOUNT_TYPE, resultErrorMessage) + .isEqualTo(accountType); + assertThat(result.getInt(BALANCE)) + .describedAs(columnMessage, BALANCE, resultErrorMessage) + .isEqualTo(INITIAL_BALANCE); + assertThat(result.getInt(SOME_COLUMN)) + .describedAs(columnMessage, SOME_COLUMN, resultErrorMessage) + .isEqualTo(accountId * accountType); + assertThat(result.getBoolean(BOOLEAN_COL)) + .describedAs(columnMessage, BOOLEAN_COL, resultErrorMessage) + .isEqualTo(accountId % 2 == 0); + assertThat(result.getBigInt(BIGINT_COL)) + .describedAs(columnMessage, BIGINT_COL, resultErrorMessage) + .isEqualTo((long) Math.pow(accountId, accountType)); + assertThat(result.getFloat(FLOAT_COL)) + .describedAs(columnMessage, FLOAT_COL, resultErrorMessage) + .isEqualTo(Float.parseFloat("0." + accountId + accountType)); + assertThat(result.getDouble(DOUBLE_COL)) + .describedAs(columnMessage, DOUBLE_COL, resultErrorMessage) + .isEqualTo(Float.parseFloat("10." + accountId + accountType)); + assertThat(result.getText(TEXT_COL)) + .describedAs(columnMessage, TEXT_COL, resultErrorMessage) + .isEqualTo(accountId + "" + accountType); + assertThat(result.getBlobAsBytes(BLOB_COL)) + .describedAs(columnMessage, BLOB_COL, resultErrorMessage) + .isEqualTo((accountId + "" + accountType).getBytes(StandardCharsets.UTF_8)); + assertThat(result.getDate(DATE_COL)) + .describedAs(columnMessage, DATE_COL, resultErrorMessage) + .isEqualTo(LocalDate.ofEpochDay(accountId + accountType)); + assertThat(result.getTime(TIME_COL)) + .describedAs(columnMessage, TIME_COL, resultErrorMessage) + .isEqualTo(LocalTime.of(accountId, accountType)); + assertThat(result.getTimestampTZ(TIMESTAMPTZ_COL)) + .describedAs(columnMessage, TIMESTAMPTZ_COL, resultErrorMessage) + .isEqualTo(LocalDateTime.of(1970, 1, 1, accountId, accountType).toInstant(ZoneOffset.UTC)); + if (isTimestampTypeSupported()) { + assertThat(result.getTimestamp(TIMESTAMP_COL)) + .describedAs(columnMessage, TIMESTAMP_COL, resultErrorMessage) + .isEqualTo(LocalDateTime.of(1970, 1, 1, accountId, accountType)); + } + } + + protected List> prepareNonKeyColumns(int accountId, int accountType) { + ImmutableList.Builder> columns = + new ImmutableList.Builder>() + .add( + IntColumn.of(BALANCE, INITIAL_BALANCE), + IntColumn.of(SOME_COLUMN, accountId * accountType), + BooleanColumn.of(BOOLEAN_COL, accountId % 2 == 0), + BigIntColumn.of(BIGINT_COL, (long) Math.pow(accountId, accountType)), + FloatColumn.of(FLOAT_COL, Float.parseFloat("0." + accountId + accountType)), + DoubleColumn.of(DOUBLE_COL, Float.parseFloat("10." + accountId + accountType)), + TextColumn.of(TEXT_COL, accountId + "" + accountType), + BlobColumn.of( + BLOB_COL, (accountId + "" + accountType).getBytes(StandardCharsets.UTF_8)), + DateColumn.of(DATE_COL, LocalDate.ofEpochDay(accountId + accountType)), + TimeColumn.of(TIME_COL, LocalTime.of(accountId, accountType)), + TimestampTZColumn.of( + TIMESTAMPTZ_COL, + LocalDateTime.of(1970, 1, 1, accountId, accountType) + .toInstant(ZoneOffset.UTC))); + if (isTimestampTypeSupported()) { + columns.add( + TimestampColumn.of(TIMESTAMP_COL, LocalDateTime.of(1970, 1, 1, accountId, accountType))); + } + return columns.build(); + } } diff --git a/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderImportIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderImportIntegrationTestBase.java index a894e43f48..1612bc20de 100644 --- a/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderImportIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderImportIntegrationTestBase.java @@ -7,6 +7,8 @@ import com.google.gson.Gson; import com.scalar.db.api.DistributedStorageAdmin; import com.scalar.db.api.DistributedTransactionAdmin; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; import com.scalar.db.service.StorageFactory; import com.scalar.db.service.TransactionFactory; import java.io.IOException; @@ -85,7 +87,10 @@ protected String getNamespace2() { protected Map getImportSchemaJsonMap() { return ImmutableMap.of( namespace1 + "." + TABLE_1, - ImmutableMap.builder().put("transaction", true).build(), + ImmutableMap.builder() + .put("transaction", true) + .put("override-columns-type", getImportableTableOverrideColumnsType()) + .build(), namespace2 + "." + TABLE_2, ImmutableMap.builder().put("transaction", false).build()); } @@ -149,6 +154,10 @@ private void dropTablesIfExist() throws Exception { protected abstract void dropNonImportableTable(String namespace, String table) throws Exception; + protected abstract Map getImportableTableOverrideColumnsType(); + + protected abstract TableMetadata getImportableTableMetadata(boolean hasTypeOverride); + protected void waitForDifferentSessionDdl() { // No wait by default. } @@ -163,9 +172,11 @@ public void importTables_ImportableTablesGiven_ShouldImportProperly() throws Exc storageAdmin.createNamespace(namespace2); waitForDifferentSessionDdl(); + // TABLE_1 set options to override column types. createImportableTable(namespace1, TABLE_1); waitForDifferentSessionDdl(); + // TABLE_2 does not set options to override column types. createImportableTable(namespace2, TABLE_2); // Act @@ -177,6 +188,10 @@ public void importTables_ImportableTablesGiven_ShouldImportProperly() throws Exc assertThat(exitCode).isEqualTo(0); assertThat(transactionAdmin.tableExists(namespace1, TABLE_1)).isTrue(); assertThat(storageAdmin.tableExists(namespace2, TABLE_2)).isTrue(); + assertThat(transactionAdmin.getTableMetadata(namespace1, TABLE_1)) + .isEqualTo(getImportableTableMetadata(true)); + assertThat(storageAdmin.getTableMetadata(namespace2, TABLE_2)) + .isEqualTo(getImportableTableMetadata(false)); assertThat(transactionAdmin.coordinatorTablesExist()).isFalse(); } diff --git a/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderIntegrationTestBase.java index 927edfaeda..29e7be5357 100644 --- a/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/schemaloader/SchemaLoaderIntegrationTestBase.java @@ -51,34 +51,6 @@ public abstract class SchemaLoaderIntegrationTestBase { private static final String TABLE_1 = "test_table1"; private static final String NAMESPACE_2 = "int_test_" + TEST_NAME + "2"; private static final String TABLE_2 = "test_table2"; - private static final TableMetadata TABLE_1_METADATA = - TableMetadata.newBuilder() - .addPartitionKey("pk1") - .addClusteringKey("ck1", Order.DESC) - .addClusteringKey("ck2", Order.ASC) - .addColumn("pk1", DataType.INT) - .addColumn("ck1", DataType.INT) - .addColumn("ck2", DataType.TEXT) - .addColumn("col1", DataType.INT) - .addColumn("col2", DataType.BIGINT) - .addColumn("col3", DataType.FLOAT) - .addColumn("col4", DataType.DOUBLE) - .addColumn("col5", DataType.TEXT) - .addColumn("col6", DataType.BLOB) - .addColumn("col7", DataType.BOOLEAN) - .addSecondaryIndex("col1") - .addSecondaryIndex("col5") - .build(); - private static final TableMetadata TABLE_2_METADATA = - TableMetadata.newBuilder() - .addPartitionKey("pk1") - .addClusteringKey("ck1", Order.ASC) - .addColumn("pk1", DataType.INT) - .addColumn("ck1", DataType.INT) - .addColumn("col1", DataType.INT) - .addColumn("col2", DataType.BIGINT) - .addColumn("col3", DataType.FLOAT) - .build(); private DistributedStorageAdmin storageAdmin; private DistributedTransactionAdmin transactionAdmin; @@ -87,6 +59,45 @@ public abstract class SchemaLoaderIntegrationTestBase { private String systemNamespaceName; private AdminTestUtils adminTestUtils; + private TableMetadata getTable1Metadata() { + TableMetadata.Builder builder = + TableMetadata.newBuilder() + .addPartitionKey("pk1") + .addClusteringKey("ck1", Order.DESC) + .addClusteringKey("ck2", Order.ASC) + .addColumn("pk1", DataType.INT) + .addColumn("ck1", DataType.INT) + .addColumn("ck2", DataType.TEXT) + .addColumn("col1", DataType.INT) + .addColumn("col2", DataType.BIGINT) + .addColumn("col3", DataType.FLOAT) + .addColumn("col4", DataType.DOUBLE) + .addColumn("col5", DataType.TEXT) + .addColumn("col6", DataType.BLOB) + .addColumn("col7", DataType.BOOLEAN) + .addColumn("col8", DataType.DATE) + .addColumn("col9", DataType.TIME) + .addColumn("col10", DataType.TIMESTAMPTZ); + if (isTimestampTypeSupported()) { + builder.addColumn("col11", DataType.TIMESTAMP); + } + + builder.addSecondaryIndex("col1").addSecondaryIndex("col5"); + return builder.build(); + } + + private TableMetadata getTable2Metadata() { + return TableMetadata.newBuilder() + .addPartitionKey("pk1") + .addClusteringKey("ck1", Order.ASC) + .addColumn("pk1", DataType.INT) + .addColumn("ck1", DataType.INT) + .addColumn("col1", DataType.INT) + .addColumn("col2", DataType.BIGINT) + .addColumn("col3", DataType.FLOAT) + .build(); + } + @BeforeAll public void beforeAll() throws Exception { initialize(TEST_NAME); @@ -157,6 +168,12 @@ protected Map getSchemaJsonMap() { .put("col5", "TEXT") .put("col6", "BLOB") .put("col7", "BOOLEAN") + .put("col8", "DATE") + .put("col9", "TIME") + .putAll( + isTimestampTypeSupported() + ? ImmutableMap.of("col10", "TIMESTAMPTZ", "col11", "TIMESTAMP") + : ImmutableMap.of("col10", "TIMESTAMPTZ")) .build()) .put("secondary-index", Arrays.asList("col1", "col5")) .build(), @@ -193,10 +210,16 @@ protected Map getAlteredSchemaJsonMap() { .put("col5", "TEXT") .put("col6", "BLOB") .put("col7", "BOOLEAN") - .put("col8", "TEXT") - .put("col9", "BLOB") + .put("col8", "DATE") + .put("col9", "TIME") + .putAll( + isTimestampTypeSupported() + ? ImmutableMap.of("col10", "TIMESTAMPTZ", "col11", "TIMESTAMP") + : ImmutableMap.of("col10", "TIMESTAMPTZ")) + .put("col12", "TEXT") + .put("col13", "BLOB") .build()) - .put("secondary-index", Arrays.asList("col3", "col8")) + .put("secondary-index", Arrays.asList("col3", "col12")) .put("compaction-strategy", "LCS") .put("network-strategy", "SimpleStrategy") .put("replication-factor", "1") @@ -402,8 +425,9 @@ public void createTablesThenDropTablesThenRepairAllWithoutCoordinator_ShouldExec assertThat(storageAdmin.namespaceExists(namespace2)).isTrue(); assertThat(adminTestUtils.tableExists(namespace1, TABLE_1)).isTrue(); assertThat(adminTestUtils.tableExists(namespace2, TABLE_2)).isTrue(); - assertThat(transactionAdmin.getTableMetadata(namespace1, TABLE_1)).isEqualTo(TABLE_1_METADATA); - assertThat(storageAdmin.getTableMetadata(namespace2, TABLE_2)).isEqualTo(TABLE_2_METADATA); + assertThat(transactionAdmin.getTableMetadata(namespace1, TABLE_1)) + .isEqualTo(getTable1Metadata()); + assertThat(storageAdmin.getTableMetadata(namespace2, TABLE_2)).isEqualTo(getTable2Metadata()); assertThat(adminTestUtils.areTableAndMetadataForCoordinatorTablesPresent()).isFalse(); } @@ -433,8 +457,9 @@ public void createTablesThenDropTablesThenRepairAllWithCoordinator_ShouldExecute assertThat(storageAdmin.namespaceExists(namespace2)).isTrue(); assertThat(adminTestUtils.tableExists(namespace1, TABLE_1)).isTrue(); assertThat(adminTestUtils.tableExists(namespace2, TABLE_2)).isTrue(); - assertThat(transactionAdmin.getTableMetadata(namespace1, TABLE_1)).isEqualTo(TABLE_1_METADATA); - assertThat(storageAdmin.getTableMetadata(namespace2, TABLE_2)).isEqualTo(TABLE_2_METADATA); + assertThat(transactionAdmin.getTableMetadata(namespace1, TABLE_1)) + .isEqualTo(getTable1Metadata()); + assertThat(storageAdmin.getTableMetadata(namespace2, TABLE_2)).isEqualTo(getTable2Metadata()); assertThat(adminTestUtils.areTableAndMetadataForCoordinatorTablesPresent()).isTrue(); } @@ -447,16 +472,16 @@ public void createTableThenAlterTables_ShouldExecuteProperly() throws Exception assertThat(exitCodeCreation).isZero(); TableMetadata expectedTable1Metadata = - TableMetadata.newBuilder(TABLE_1_METADATA) - .addColumn("col8", DataType.TEXT) - .addColumn("col9", DataType.BLOB) + TableMetadata.newBuilder(getTable1Metadata()) + .addColumn("col12", DataType.TEXT) + .addColumn("col13", DataType.BLOB) .removeSecondaryIndex("col1") .removeSecondaryIndex("col5") .addSecondaryIndex("col3") - .addSecondaryIndex("col8") + .addSecondaryIndex("col12") .build(); TableMetadata expectedTable2Metadata = - TableMetadata.newBuilder(TABLE_2_METADATA).addColumn("col4", DataType.TEXT).build(); + TableMetadata.newBuilder(getTable2Metadata()).addColumn("col4", DataType.TEXT).build(); // Act int exitCodeAlteration = @@ -555,4 +580,8 @@ private int executeWithArgs(List args) { protected void waitForCreationIfNecessary() { // Do nothing } + + protected boolean isTimestampTypeSupported() { + return true; + } } diff --git a/integration-test/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionIntegrationTestBase.java index 2130ffe1ce..d9c27237c1 100644 --- a/integration-test/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/transaction/singlecrudoperation/SingleCrudOperationTransactionIntegrationTestBase.java @@ -2,6 +2,7 @@ import com.scalar.db.api.DistributedTransactionIntegrationTestBase; import com.scalar.db.api.Insert; +import com.scalar.db.api.InsertBuilder; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.exception.transaction.TransactionException; import com.scalar.db.io.Key; @@ -35,16 +36,14 @@ protected void populateRecords() throws TransactionException { for (int j = 0; j < NUM_TYPES; j++) { Key partitionKey = Key.ofInt(ACCOUNT_ID, i); Key clusteringKey = Key.ofInt(ACCOUNT_TYPE, j); - Insert insert = + InsertBuilder.Buildable insert = Insert.newBuilder() .namespace(namespace) .table(TABLE) .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .intValue(BALANCE, INITIAL_BALANCE) - .intValue(SOME_COLUMN, i * j) - .build(); - manager.insert(insert); + .clusteringKey(clusteringKey); + prepareNonKeyColumns(i, j).forEach(insert::value); + manager.insert(insert.build()); } } } diff --git a/integration-test/src/main/java/com/scalar/db/util/TestUtils.java b/integration-test/src/main/java/com/scalar/db/util/TestUtils.java index 1830e6d0eb..5f59726894 100644 --- a/integration-test/src/main/java/com/scalar/db/util/TestUtils.java +++ b/integration-test/src/main/java/com/scalar/db/util/TestUtils.java @@ -11,13 +11,24 @@ import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; import com.scalar.db.io.IntColumn; import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.time.temporal.ChronoField; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Random; import java.util.Set; @@ -61,6 +72,14 @@ public static Column getColumnWithRandomValue( byte[] bytes = new byte[length]; random.nextBytes(bytes); return BlobColumn.of(columnName, bytes); + case DATE: + return DateColumn.of(columnName, nextDate(random)); + case TIME: + return TimeColumn.of(columnName, nextTime(random)); + case TIMESTAMP: + return TimestampColumn.of(columnName, nextTimestamp(random)); + case TIMESTAMPTZ: + return TimestampTZColumn.of(columnName, nextTimestampTZ(random)); default: throw new AssertionError(); } @@ -68,19 +87,76 @@ public static Column getColumnWithRandomValue( public static long nextBigInt(Random random) { return random - .longs(BigIntValue.MIN_VALUE, (BigIntValue.MAX_VALUE + 1)) - .limit(1) + .longs(1, BigIntColumn.MIN_VALUE, (BigIntColumn.MAX_VALUE + 1)) .findFirst() .orElse(0); } public static float nextFloat(Random random) { - return (float) - random.doubles(Float.MIN_VALUE, Float.MAX_VALUE).limit(1).findFirst().orElse(0.0d); + return (float) random.doubles(1, Float.MIN_VALUE, Float.MAX_VALUE).findFirst().orElse(0.0d); } public static double nextDouble(Random random) { - return random.doubles(Double.MIN_VALUE, Double.MAX_VALUE).limit(1).findFirst().orElse(0.0d); + return random.doubles(1, Double.MIN_VALUE, Double.MAX_VALUE).findFirst().orElse(0.0d); + } + + public static LocalDate nextDate(Random random) { + return nextLocalDate( + random, DateColumn.MIN_VALUE.toEpochDay(), DateColumn.MAX_VALUE.toEpochDay()); + } + + public static LocalTime nextTime(Random random) { + return nextLocalTime( + random, + TimeColumn.MIN_VALUE.toNanoOfDay(), + TimeColumn.MAX_VALUE.toNanoOfDay(), + TimeColumn.FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS); + } + + public static LocalDateTime nextTimestamp(Random random) { + LocalDate date = + nextLocalDate( + random, + TimestampColumn.MIN_VALUE.getLong(ChronoField.EPOCH_DAY), + TimestampColumn.MAX_VALUE.getLong(ChronoField.EPOCH_DAY)); + LocalTime time = + nextLocalTime( + random, + TimestampColumn.MIN_VALUE.getLong(ChronoField.NANO_OF_DAY), + TimestampColumn.MAX_VALUE.getLong(ChronoField.NANO_OF_DAY), + TimestampColumn.FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS); + + return LocalDateTime.of(date, time); + } + + public static Instant nextTimestampTZ(Random random) { + LocalDate date = + nextLocalDate( + random, + TimestampTZColumn.MIN_VALUE.atOffset(ZoneOffset.UTC).getLong(ChronoField.EPOCH_DAY), + TimestampTZColumn.MAX_VALUE.atOffset(ZoneOffset.UTC).getLong(ChronoField.EPOCH_DAY)); + LocalTime time = + nextLocalTime( + random, + TimestampTZColumn.MIN_VALUE.atOffset(ZoneOffset.UTC).getLong(ChronoField.NANO_OF_DAY), + TimestampTZColumn.MAX_VALUE.atOffset(ZoneOffset.UTC).getLong(ChronoField.NANO_OF_DAY), + TimestampTZColumn.FRACTIONAL_SECONDS_PRECISION_IN_NANOSECONDS); + + return LocalDateTime.of(date, time).toInstant(ZoneOffset.UTC); + } + + private static LocalDate nextLocalDate(Random random, long minEpochDay, long maxEpochDay) { + long epochDay = random.longs(1, minEpochDay, maxEpochDay + 1).findFirst().orElse(0); + return LocalDate.ofEpochDay(epochDay); + } + + @SuppressWarnings("JavaLocalTimeGetNano") + public static LocalTime nextLocalTime( + Random random, long minNanoOfDay, long maxNanoOfDay, int resolutionInNanos) { + long nanoOfDay = random.longs(1, minNanoOfDay, maxNanoOfDay + 1).findFirst().orElse(0); + LocalTime time = LocalTime.ofNanoOfDay(nanoOfDay); + + return time.withNano(time.getNano() / resolutionInNanos * resolutionInNanos); } public static Column getColumnWithMinValue(String columnName, DataType dataType) { @@ -104,6 +180,14 @@ public static Column getColumnWithMinValue( return TextColumn.of(columnName, allowEmpty ? "" : "\u0001"); case BOOLEAN: return BooleanColumn.of(columnName, false); + case DATE: + return DateColumn.of(columnName, DateColumn.MIN_VALUE); + case TIME: + return TimeColumn.of(columnName, TimeColumn.MIN_VALUE); + case TIMESTAMP: + return TimestampColumn.of(columnName, TimestampColumn.MIN_VALUE); + case TIMESTAMPTZ: + return TimestampTZColumn.of(columnName, TimestampTZColumn.MIN_VALUE); default: throw new AssertionError(); } @@ -129,6 +213,14 @@ public static Column getColumnWithMaxValue(String columnName, DataType dataTy return TextColumn.of(columnName, builder.toString()); case BOOLEAN: return BooleanColumn.of(columnName, true); + case DATE: + return DateColumn.of(columnName, DateColumn.MAX_VALUE); + case TIME: + return TimeColumn.of(columnName, TimeColumn.MAX_VALUE); + case TIMESTAMP: + return TimestampColumn.of(columnName, TimestampColumn.MAX_VALUE); + case TIMESTAMPTZ: + return TimestampTZColumn.of(columnName, TimestampTZColumn.MAX_VALUE); default: throw new AssertionError(); } @@ -190,7 +282,7 @@ public static void assertResultsContainsExactlyInAnyOrder( for (Result actualResult : actualResults) { ExpectedResult matchedExpectedResult = findFirstMatchingResult(actualResult, expectedResults); if (matchedExpectedResult == null) { - Assertions.fail("The actual result " + actualResult + " is not expected"); + Assertions.fail("This actual result is not expected: " + actualResult); } else { expectedResults.remove(matchedExpectedResult); } @@ -268,6 +360,11 @@ public ExpectedResultBuilder column(Column column) { return this; } + public ExpectedResultBuilder columns(Collection> columns) { + this.columns.addAll(columns); + return this; + } + public ExpectedResult build() { return new ExpectedResult(this); } diff --git a/schema-loader/sample/import_schema_sample.json b/schema-loader/sample/import_schema_sample.json index 3078dea3a2..b1431569ff 100644 --- a/schema-loader/sample/import_schema_sample.json +++ b/schema-loader/sample/import_schema_sample.json @@ -1,6 +1,10 @@ { "sample_namespace1.sample_table1": { - "transaction": true + "transaction": true, + "override-columns-type": { + "c3": "TIME", + "c5": "TIMESTAMP" + } }, "sample_namespace1.sample_table2": { "transaction": true diff --git a/schema-loader/src/main/java/com/scalar/db/schemaloader/ImportTableSchema.java b/schema-loader/src/main/java/com/scalar/db/schemaloader/ImportTableSchema.java index 4023afe68f..5c114de096 100644 --- a/schema-loader/src/main/java/com/scalar/db/schemaloader/ImportTableSchema.java +++ b/schema-loader/src/main/java/com/scalar/db/schemaloader/ImportTableSchema.java @@ -2,18 +2,23 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.scalar.db.common.error.CoreError; +import com.scalar.db.io.DataType; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import javax.annotation.concurrent.Immutable; @Immutable public class ImportTableSchema { + private static final String OVERRIDE_COLUMNS_TYPE = "override-columns-type"; private final String namespace; private final String tableName; private final boolean isTransactionTable; private final ImmutableMap options; + private final ImmutableMap overrideColumnsType; public ImportTableSchema( String tableFullName, JsonObject tableDefinition, Map options) { @@ -30,9 +35,35 @@ public ImportTableSchema( } else { isTransactionTable = true; } + this.overrideColumnsType = parseOverrideColumnsType(tableFullName, tableDefinition); this.options = buildOptions(tableDefinition, options); } + private ImmutableMap parseOverrideColumnsType( + String tableFullName, JsonObject tableDefinition) { + if (!tableDefinition.has(OVERRIDE_COLUMNS_TYPE)) { + return ImmutableMap.of(); + } + JsonObject columns = tableDefinition.getAsJsonObject(OVERRIDE_COLUMNS_TYPE); + ImmutableMap.Builder columnsBuilder = ImmutableMap.builder(); + for (Entry column : columns.entrySet()) { + String columnName = column.getKey(); + String columnDataType = column.getValue().getAsString().trim(); + DataType dataType = TableSchema.DATA_MAP_TYPE.get(columnDataType.toUpperCase()); + if (dataType == null) { + throw new IllegalArgumentException( + CoreError.SCHEMA_LOADER_PARSE_ERROR_INVALID_COLUMN_TYPE.buildMessage( + tableFullName, columnName, column.getValue().getAsString())); + } + columnsBuilder.put(columnName, dataType); + } + return columnsBuilder.buildKeepingLast(); + } + + // For the SpotBugs warning CT_CONSTRUCTOR_THROW + @Override + protected final void finalize() {} + private ImmutableMap buildOptions( JsonObject tableDefinition, Map globalOptions) { ImmutableMap.Builder optionsBuilder = ImmutableMap.builder(); @@ -43,7 +74,8 @@ private ImmutableMap buildOptions( TableSchema.CLUSTERING_KEY, TableSchema.TRANSACTION, TableSchema.COLUMNS, - TableSchema.SECONDARY_INDEX); + TableSchema.SECONDARY_INDEX, + OVERRIDE_COLUMNS_TYPE); tableDefinition.entrySet().stream() .filter(entry -> !keysToIgnore.contains(entry.getKey())) .forEach(entry -> optionsBuilder.put(entry.getKey(), entry.getValue().getAsString())); @@ -66,4 +98,8 @@ public boolean isTransactionTable() { public Map getOptions() { return options; } + + public Map getOverrideColumnsType() { + return overrideColumnsType; + } } diff --git a/schema-loader/src/main/java/com/scalar/db/schemaloader/SchemaOperator.java b/schema-loader/src/main/java/com/scalar/db/schemaloader/SchemaOperator.java index 11b3965f8e..efe7c01ed0 100644 --- a/schema-loader/src/main/java/com/scalar/db/schemaloader/SchemaOperator.java +++ b/schema-loader/src/main/java/com/scalar/db/schemaloader/SchemaOperator.java @@ -404,9 +404,13 @@ public void importTables(List tableSchemaList, Map DATA_MAP_TYPE = + static final ImmutableMap DATA_MAP_TYPE = ImmutableMap.builder() .put("BOOLEAN", DataType.BOOLEAN) .put("INT", DataType.INT) @@ -35,6 +35,10 @@ public class TableSchema { .put("DOUBLE", DataType.DOUBLE) .put("TEXT", DataType.TEXT) .put("BLOB", DataType.BLOB) + .put("DATE", DataType.DATE) + .put("TIME", DataType.TIME) + .put("TIMESTAMP", DataType.TIMESTAMP) + .put("TIMESTAMPTZ", DataType.TIMESTAMPTZ) .build(); private static final ImmutableMap ORDER_MAP = ImmutableMap.builder().put("ASC", Order.ASC).put("DESC", Order.DESC).build(); diff --git a/schema-loader/src/test/java/com/scalar/db/schemaloader/ImportTableSchemaTest.java b/schema-loader/src/test/java/com/scalar/db/schemaloader/ImportTableSchemaTest.java index 84265d52dd..65515f40c7 100644 --- a/schema-loader/src/test/java/com/scalar/db/schemaloader/ImportTableSchemaTest.java +++ b/schema-loader/src/test/java/com/scalar/db/schemaloader/ImportTableSchemaTest.java @@ -1,10 +1,12 @@ package com.scalar.db.schemaloader; +import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.entry; import com.google.common.collect.ImmutableMap; import com.google.gson.JsonObject; import com.google.gson.JsonParser; +import com.scalar.db.io.DataType; import java.util.Collections; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -22,8 +24,7 @@ public void setUp() throws Exception { } @Test - public void constructor_DefinitionWithTransactionTrueGiven_ShouldConstructProperTableSchema() - throws SchemaLoaderException { + public void constructor_DefinitionWithTransactionTrueGiven_ShouldConstructProperTableSchema() { String tableDefinitionJson = "{\"transaction\": true}"; JsonObject tableDefinition = JsonParser.parseString(tableDefinitionJson).getAsJsonObject(); @@ -32,15 +33,15 @@ public void constructor_DefinitionWithTransactionTrueGiven_ShouldConstructProper new ImportTableSchema("ns.tbl", tableDefinition, Collections.emptyMap()); // Assert - Assertions.assertThat(tableSchema.getNamespace()).isEqualTo("ns"); - Assertions.assertThat(tableSchema.getTable()).isEqualTo("tbl"); - Assertions.assertThat(tableSchema.isTransactionTable()).isEqualTo(true); - Assertions.assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getNamespace()).isEqualTo("ns"); + assertThat(tableSchema.getTable()).isEqualTo("tbl"); + assertThat(tableSchema.isTransactionTable()).isEqualTo(true); + assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getOverrideColumnsType()).isEmpty(); } @Test - public void constructor_DefinitionWithTransactionFalseGiven_ShouldConstructProperTableSchema() - throws SchemaLoaderException { + public void constructor_DefinitionWithTransactionFalseGiven_ShouldConstructProperTableSchema() { String tableDefinitionJson = "{\"transaction\": false}"; JsonObject tableDefinition = JsonParser.parseString(tableDefinitionJson).getAsJsonObject(); @@ -49,10 +50,37 @@ public void constructor_DefinitionWithTransactionFalseGiven_ShouldConstructPrope new ImportTableSchema("ns.tbl", tableDefinition, Collections.emptyMap()); // Assert - Assertions.assertThat(tableSchema.getNamespace()).isEqualTo("ns"); - Assertions.assertThat(tableSchema.getTable()).isEqualTo("tbl"); - Assertions.assertThat(tableSchema.isTransactionTable()).isEqualTo(false); - Assertions.assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getNamespace()).isEqualTo("ns"); + assertThat(tableSchema.getTable()).isEqualTo("tbl"); + assertThat(tableSchema.isTransactionTable()).isEqualTo(false); + assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getOverrideColumnsType()).isEmpty(); + } + + @Test + public void + constructor_DefinitionWithOverrideColumnsTypeGiven_ShouldConstructProperTableSchema() { + String tableDefinitionJson = + "{" + + " \"transaction\": true," + + " \"override-columns-type\": {" + + " \"c3\": \"TIME\"," + + " \"c5\": \"TIMESTAMP\"" + + " }" + + " }"; + JsonObject tableDefinition = JsonParser.parseString(tableDefinitionJson).getAsJsonObject(); + + // Act + ImportTableSchema tableSchema = + new ImportTableSchema("ns.tbl", tableDefinition, Collections.emptyMap()); + + // Assert + assertThat(tableSchema.getNamespace()).isEqualTo("ns"); + assertThat(tableSchema.getTable()).isEqualTo("tbl"); + assertThat(tableSchema.isTransactionTable()).isEqualTo(true); + assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getOverrideColumnsType()) + .containsOnly(entry("c3", DataType.TIME), entry("c5", DataType.TIMESTAMP)); } @Test @@ -67,8 +95,7 @@ public void constructor_WrongFormatTableFullNameGiven_ShouldThrowIllegalArgument } @Test - public void constructor_DefinitionWithoutTransactionGiven_ShouldConstructProperTableSchema() - throws SchemaLoaderException { + public void constructor_DefinitionWithoutTransactionGiven_ShouldConstructProperTableSchema() { String tableDefinitionJson = "{}"; JsonObject tableDefinition = JsonParser.parseString(tableDefinitionJson).getAsJsonObject(); @@ -77,17 +104,17 @@ public void constructor_DefinitionWithoutTransactionGiven_ShouldConstructProperT new ImportTableSchema("ns.tbl", tableDefinition, Collections.emptyMap()); // Assert - Assertions.assertThat(tableSchema.getNamespace()).isEqualTo("ns"); - Assertions.assertThat(tableSchema.getTable()).isEqualTo("tbl"); - Assertions.assertThat(tableSchema.isTransactionTable()).isEqualTo(true); - Assertions.assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getNamespace()).isEqualTo("ns"); + assertThat(tableSchema.getTable()).isEqualTo("tbl"); + assertThat(tableSchema.isTransactionTable()).isEqualTo(true); + assertThat(tableSchema.getOptions()).isEmpty(); + assertThat(tableSchema.getOverrideColumnsType()).isEmpty(); } @Test - public void constructor_DefinitionWithGlobalAndSchemaOptions_ShouldConstructWithProperOptions() - throws SchemaLoaderException { + public void constructor_DefinitionWithGlobalAndSchemaOptions_ShouldConstructWithProperOptions() { String tableDefinitionJson = - "{\"partition-key\": \"ignored\", \"columns\": \"ignored\", \"clustering-key\": \"ignored\", \"secondary-index\": \"ignored\",\"transaction\": false, \"opt1\": \"schema-opt1\", \"opt3\": \"schema-opt3\"}"; + "{\"partition-key\": \"ignored\", \"columns\": \"ignored\", \"clustering-key\": \"ignored\", \"secondary-index\": \"ignored\",\"transaction\": false, \"opt1\": \"schema-opt1\", \"opt3\": \"schema-opt3\", \"override-columns-type\": {\"c1\": \"DOUBLE\"}}"; JsonObject tableDefinition = JsonParser.parseString(tableDefinitionJson).getAsJsonObject(); // Act @@ -98,13 +125,14 @@ public void constructor_DefinitionWithGlobalAndSchemaOptions_ShouldConstructWith ImmutableMap.of("opt1", "global-opt1", "opt2", "global-opt2")); // Assert - Assertions.assertThat(tableSchema.getNamespace()).isEqualTo("ns"); - Assertions.assertThat(tableSchema.getTable()).isEqualTo("tbl"); - Assertions.assertThat(tableSchema.isTransactionTable()).isEqualTo(false); - Assertions.assertThat(tableSchema.getOptions()) + assertThat(tableSchema.getNamespace()).isEqualTo("ns"); + assertThat(tableSchema.getTable()).isEqualTo("tbl"); + assertThat(tableSchema.isTransactionTable()).isEqualTo(false); + assertThat(tableSchema.getOptions()) .containsOnly( entry("opt1", "schema-opt1"), entry("opt2", "global-opt2"), entry("opt3", "schema-opt3")); + assertThat(tableSchema.getOverrideColumnsType()).containsOnly(entry("c1", DataType.DOUBLE)); } } diff --git a/schema-loader/src/test/java/com/scalar/db/schemaloader/SchemaOperatorTest.java b/schema-loader/src/test/java/com/scalar/db/schemaloader/SchemaOperatorTest.java index c0a5039c85..9bc58c13fa 100644 --- a/schema-loader/src/test/java/com/scalar/db/schemaloader/SchemaOperatorTest.java +++ b/schema-loader/src/test/java/com/scalar/db/schemaloader/SchemaOperatorTest.java @@ -512,12 +512,14 @@ public void importTables_WithTransactionTables_ShouldCallProperMethods() throws when(importTableSchema.getNamespace()).thenReturn("ns"); when(importTableSchema.isTransactionTable()).thenReturn(true); when(importTableSchema.getTable()).thenReturn("tb"); + Map overrideColumnsType = ImmutableMap.of("c1", DataType.INT); + when(importTableSchema.getOverrideColumnsType()).thenReturn(overrideColumnsType); // Act operator.importTables(tableSchemaList, options); // Assert - verify(transactionAdmin, times(3)).importTable("ns", "tb", options); + verify(transactionAdmin, times(3)).importTable("ns", "tb", options, overrideColumnsType); verifyNoInteractions(storageAdmin); } @@ -529,12 +531,14 @@ public void importTables_WithoutTransactionTables_ShouldCallProperMethods() thro when(importTableSchema.getNamespace()).thenReturn("ns"); when(importTableSchema.isTransactionTable()).thenReturn(false); when(importTableSchema.getTable()).thenReturn("tb"); + Map overrideColumnsType = ImmutableMap.of("c1", DataType.INT); + when(importTableSchema.getOverrideColumnsType()).thenReturn(overrideColumnsType); // Act operator.importTables(tableSchemaList, options); // Assert - verify(storageAdmin, times(3)).importTable("ns", "tb", options); + verify(storageAdmin, times(3)).importTable("ns", "tb", options, overrideColumnsType); verifyNoInteractions(transactionAdmin); } diff --git a/schema-loader/src/test/java/com/scalar/db/schemaloader/TableSchemaTest.java b/schema-loader/src/test/java/com/scalar/db/schemaloader/TableSchemaTest.java index f54f2d59f9..13bb5dca26 100644 --- a/schema-loader/src/test/java/com/scalar/db/schemaloader/TableSchemaTest.java +++ b/schema-loader/src/test/java/com/scalar/db/schemaloader/TableSchemaTest.java @@ -152,7 +152,13 @@ public void buildOptions_OptionsMapFromTableDefinitionAndOptionsGiven_ShouldRetu + " \"c3\": \"BLOB\"," + " \"c4\": \"INT\"," + " \"c5\": \"BOOLEAN\"," - + " \"c6\": \"INT\"" + + " \"c6\": \"FLOAT\"," + + " \"c7\": \"DOUBLE\"," + + " \"c8\": \"BIGINT\"," + + " \"c9\": \"DATE\"," + + " \"c10\": \"TIME\"," + + " \"c11\": \"TIMESTAMP\"," + + " \"c12\": \"TIMESTAMPTZ\"" + "}," + "\"ru\": 5000," + "\"compaction-strategy\": \"LCS\"," @@ -171,7 +177,13 @@ public void buildOptions_OptionsMapFromTableDefinitionAndOptionsGiven_ShouldRetu tableBuilder.addColumn("c3", DataType.BLOB); tableBuilder.addColumn("c4", DataType.INT); tableBuilder.addColumn("c5", DataType.BOOLEAN); - tableBuilder.addColumn("c6", DataType.INT); + tableBuilder.addColumn("c6", DataType.FLOAT); + tableBuilder.addColumn("c7", DataType.DOUBLE); + tableBuilder.addColumn("c8", DataType.BIGINT); + tableBuilder.addColumn("c9", DataType.DATE); + tableBuilder.addColumn("c10", DataType.TIME); + tableBuilder.addColumn("c11", DataType.TIMESTAMP); + tableBuilder.addColumn("c12", DataType.TIMESTAMPTZ); TableMetadata expectedTableMetadata = tableBuilder.build(); // Act From 99f8b453c28d362fb793150c6812ea6a872a18b0 Mon Sep 17 00:00:00 2001 From: Kodai Doki <52027276+KodaiD@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:31:45 +0900 Subject: [PATCH 15/18] Remove unnecessary loggings in statement handlers (#2469) --- .../com/scalar/db/storage/cassandra/BatchHandler.java | 1 - .../db/storage/cassandra/MutateStatementHandler.java | 6 ------ .../db/storage/cassandra/SelectStatementHandler.java | 8 +------- .../com/scalar/db/storage/cassandra/StatementHandler.java | 6 +----- .../java/com/scalar/db/storage/cosmos/BatchHandler.java | 4 ---- .../scalar/db/storage/cosmos/MutateStatementHandler.java | 4 ---- 6 files changed, 2 insertions(+), 27 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/BatchHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/BatchHandler.java index b604e5bf4c..36662b7bb2 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/BatchHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/BatchHandler.java @@ -73,7 +73,6 @@ public void handle(List mutations) CoreError.CASSANDRA_OPERATION_FAILED_IN_BATCH.buildMessage(writeType), e); } } catch (RuntimeException e) { - logger.warn(e.getMessage(), e); throw new RetriableExecutionException( CoreError.CASSANDRA_ERROR_OCCURRED_IN_BATCH.buildMessage(e.getMessage()), e); } diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/MutateStatementHandler.java index a5d99a2c70..2594da421c 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/MutateStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/MutateStatementHandler.java @@ -15,14 +15,10 @@ import com.scalar.db.exception.storage.RetriableExecutionException; import javax.annotation.Nonnull; import javax.annotation.concurrent.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** An abstraction for handler classes for mutate statements */ @ThreadSafe public abstract class MutateStatementHandler extends StatementHandler { - private static final Logger logger = LoggerFactory.getLogger(MutateStatementHandler.class); - public MutateStatementHandler(Session session) { super(session); } @@ -48,7 +44,6 @@ public ResultSet handle(Operation operation) throws ExecutionException { } return results; } catch (WriteTimeoutException e) { - logger.warn("Write timeout happened during mutate operation", e); if (e.getWriteType() == WriteType.CAS) { // retry needs to be done if applications need to do the operation exactly throw new RetriableExecutionException( @@ -71,7 +66,6 @@ public ResultSet handle(Operation operation) throws ExecutionException { CoreError.CASSANDRA_WRITE_TIMEOUT_WITH_OTHER_WRITE_TYPE_IN_MUTATION.buildMessage(), e); } } catch (RuntimeException e) { - logger.warn(e.getMessage(), e); throw new RetriableExecutionException( CoreError.CASSANDRA_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java index 1ca9ddb851..d32168c52a 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/SelectStatementHandler.java @@ -33,8 +33,6 @@ import java.util.stream.IntStream; import javax.annotation.Nonnull; import javax.annotation.concurrent.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A handler class for select statement @@ -43,8 +41,6 @@ */ @ThreadSafe public class SelectStatementHandler extends StatementHandler { - private static final Logger logger = LoggerFactory.getLogger(SelectStatementHandler.class); - /** * Constructs {@code SelectStatementHandler} with the specified {@code Session} * @@ -60,7 +56,6 @@ public ResultSet handle(Operation operation) throws ExecutionException { try { return handleInternal(operation); } catch (RuntimeException e) { - logger.error(e.getMessage(), e); throw new ExecutionException( CoreError.CASSANDRA_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); } @@ -296,8 +291,7 @@ private Ordering getOrdering(Scan.Ordering ordering) { case DESC: return QueryBuilder.desc(quoteIfNecessary(ordering.getColumnName())); default: - logger.warn("Unsupported ordering specified. Using Order.ASC"); - return QueryBuilder.asc(quoteIfNecessary(ordering.getColumnName())); + throw new AssertionError("Unsupported ordering is specified: " + ordering.getOrder()); } } diff --git a/core/src/main/java/com/scalar/db/storage/cassandra/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/cassandra/StatementHandler.java index e2b5fe2862..980ca796f2 100644 --- a/core/src/main/java/com/scalar/db/storage/cassandra/StatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cassandra/StatementHandler.java @@ -14,13 +14,10 @@ import com.scalar.db.exception.storage.ExecutionException; import javax.annotation.Nonnull; import javax.annotation.concurrent.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** A handler class for statements */ @ThreadSafe public abstract class StatementHandler { - private static final Logger logger = LoggerFactory.getLogger(StatementHandler.class); protected final Session session; protected final StatementCache cache; @@ -119,8 +116,7 @@ public static ConsistencyLevel convert(Operation operation, Consistency consiste return ConsistencyLevel.QUORUM; } default: - logger.warn("Unsupported consistency is specified. SEQUENTIAL is being used instead"); - return ConsistencyLevel.QUORUM; + throw new AssertionError("Unsupported consistency is specified: " + consistency); } } diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/BatchHandler.java b/core/src/main/java/com/scalar/db/storage/cosmos/BatchHandler.java index 4981297658..b72733b95a 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/BatchHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/BatchHandler.java @@ -12,8 +12,6 @@ import java.util.ArrayList; import java.util.List; import javax.annotation.concurrent.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A handler for a batch @@ -22,7 +20,6 @@ */ @ThreadSafe public class BatchHandler { - private static final Logger logger = LoggerFactory.getLogger(BatchHandler.class); private static final String MUTATION_STORED_PROCEDURE = "mutate.js"; private final CosmosClient client; private final TableMetadataManager metadataManager; @@ -88,7 +85,6 @@ private void executeStoredProcedure( } private void throwException(CosmosException exception) throws ExecutionException { - logger.error(exception.getMessage(), exception); int statusCode = exception.getSubStatusCode(); if (statusCode == CosmosErrorCode.PRECONDITION_FAILED.get()) { diff --git a/core/src/main/java/com/scalar/db/storage/cosmos/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/cosmos/MutateStatementHandler.java index d0956687a1..0cf7c91709 100644 --- a/core/src/main/java/com/scalar/db/storage/cosmos/MutateStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/cosmos/MutateStatementHandler.java @@ -12,13 +12,10 @@ import java.util.ArrayList; import java.util.List; import javax.annotation.concurrent.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** An abstraction for handler classes for mutate statements */ @ThreadSafe public abstract class MutateStatementHandler extends StatementHandler { - private static final Logger logger = LoggerFactory.getLogger(MutateStatementHandler.class); private static final String MUTATION_STORED_PROCEDURE = "mutate.js"; public MutateStatementHandler(CosmosClient client, TableMetadataManager metadataManager) { @@ -60,7 +57,6 @@ protected void executeStoredProcedure(Mutation mutation, TableMetadata tableMeta } private void throwException(CosmosException exception) throws ExecutionException { - logger.error(exception.getMessage()); int statusCode = exception.getSubStatusCode(); if (statusCode == CosmosErrorCode.PRECONDITION_FAILED.get()) { From 46726d93de3c8f3c4381fa28dd254e0d7efd1d44 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Wed, 22 Jan 2025 16:30:36 +0900 Subject: [PATCH 16/18] Add importTable without overrideColumnsType to DecoratedDistributedTransactionAdmin (#2476) --- .../db/common/DecoratedDistributedTransactionAdmin.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java index 588ec40a77..9adab56f4c 100644 --- a/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java +++ b/core/src/main/java/com/scalar/db/common/DecoratedDistributedTransactionAdmin.java @@ -195,6 +195,12 @@ public void addNewColumnToTable( namespace, table, columnName, columnType, encrypted); } + @Override + public void importTable(String namespace, String table, Map options) + throws ExecutionException { + distributedTransactionAdmin.importTable(namespace, table, options); + } + @Override public void importTable( String namespace, From 040c85d6ddfcecdd19febc668f7e4b843009e289 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Fri, 24 Jan 2025 13:50:01 +0900 Subject: [PATCH 17/18] Update Scheduled Vulnerability Check and Dependabot configuration (#2479) --- .github/dependabot.yml | 20 ++++++++++---------- .github/workflows/scheduled-vuln-check.yaml | 18 +++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index eaa39cfaf1..6a7ac7fa07 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -42,9 +42,9 @@ updates: - "scalar-labs/scalardb" - package-ecosystem: "github-actions" - target-branch: "3.14" + target-branch: "3.15" groups: - actions on branch 3.14: + actions on branch 3.15: patterns: - "*" directory: "/" @@ -54,9 +54,9 @@ updates: - "scalar-labs/scalardb" - package-ecosystem: "github-actions" - target-branch: "3.13" + target-branch: "3.14" groups: - actions on branch 3.13: + actions on branch 3.14: patterns: - "*" directory: "/" @@ -66,9 +66,9 @@ updates: - "scalar-labs/scalardb" - package-ecosystem: "github-actions" - target-branch: "3.12" + target-branch: "3.13" groups: - actions on branch 3.12: + actions on branch 3.13: patterns: - "*" directory: "/" @@ -78,9 +78,9 @@ updates: - "scalar-labs/scalardb" - package-ecosystem: "github-actions" - target-branch: "3.11" + target-branch: "3.12" groups: - actions on branch 3.11: + actions on branch 3.12: patterns: - "*" directory: "/" @@ -90,9 +90,9 @@ updates: - "scalar-labs/scalardb" - package-ecosystem: "github-actions" - target-branch: "3.10" + target-branch: "3.11" groups: - actions on branch 3.10: + actions on branch 3.11: patterns: - "*" directory: "/" diff --git a/.github/workflows/scheduled-vuln-check.yaml b/.github/workflows/scheduled-vuln-check.yaml index 4a8f5446d5..57c01edfda 100644 --- a/.github/workflows/scheduled-vuln-check.yaml +++ b/.github/workflows/scheduled-vuln-check.yaml @@ -16,15 +16,6 @@ jobs: CR_PAT: ${{ secrets.CR_PAT }} SLACK_SECURITY_WEBHOOK_URL: ${{ secrets.SLACK_SECURITY_WEBHOOK_URL }} - call-vuln-check-for-v3_10: - uses: ./.github/workflows/vuln-check.yaml - with: - target-ref: v3.10 - find-latest-release: true - secrets: - CR_PAT: ${{ secrets.CR_PAT }} - SLACK_SECURITY_WEBHOOK_URL: ${{ secrets.SLACK_SECURITY_WEBHOOK_URL }} - call-vuln-check-for-v3_11: uses: ./.github/workflows/vuln-check.yaml with: @@ -60,3 +51,12 @@ jobs: secrets: CR_PAT: ${{ secrets.CR_PAT }} SLACK_SECURITY_WEBHOOK_URL: ${{ secrets.SLACK_SECURITY_WEBHOOK_URL }} + + call-vuln-check-for-v3_15: + uses: ./.github/workflows/vuln-check.yaml + with: + target-ref: v3.15 + find-latest-release: true + secrets: + CR_PAT: ${{ secrets.CR_PAT }} + SLACK_SECURITY_WEBHOOK_URL: ${{ secrets.SLACK_SECURITY_WEBHOOK_URL }} From a6d627e26aa375b2fd04b2e69e0921633c9212ad Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Fri, 24 Jan 2025 15:43:22 +0900 Subject: [PATCH 18/18] Update ScalarDB dependency version to 3.15.0 in README (#2480) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 80e5b62c2f..cb9e364e62 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ You can install it in your application using your build tool such as Gradle and To add a dependency on ScalarDB using Gradle, use the following: ```gradle dependencies { - implementation 'com.scalar-labs:scalardb:3.14.0' + implementation 'com.scalar-labs:scalardb:3.15.0' } ``` @@ -19,7 +19,7 @@ To add a dependency using Maven: com.scalar-labs scalardb - 3.14.0 + 3.15.0 ```