From 90253bcdf4bb4c3614785c426bbbabf6c1c05e8e Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Wed, 26 Mar 2025 10:30:48 +0900 Subject: [PATCH 01/20] Add Blob Storage adapter --- build.gradle | 1 + core/build.gradle | 25 + ...AdminIntegrationTestWithObjectStorage.java | 65 +++ ...epairIntegrationTestWithObjectStorage.java | 19 + ...nScanIntegrationTestWithObjectStorage.java | 23 + ...ommitIntegrationTestWithObjectStorage.java | 36 ++ ...adataIntegrationTestWithObjectStorage.java | 13 + .../ConsensusCommitObjectStorageEnv.java | 18 + ...cificIntegrationTestWithObjectStorage.java | 45 ++ ...abledIntegrationTestWithObjectStorage.java | 13 + .../ObjectStorageAdminIntegrationTest.java | 70 +++ ...jectStorageAdminRepairIntegrationTest.java | 18 + .../ObjectStorageAdminTestUtils.java | 114 ++++ ...jectStorageColumnValueIntegrationTest.java | 12 + ...ageConditionalMutationIntegrationTest.java | 30 + ...rageCrossPartitionScanIntegrationTest.java | 30 + .../objectstorage/ObjectStorageEnv.java | 44 ++ .../ObjectStorageIntegrationTest.java | 44 ++ .../ObjectStorageJapaneseIntegrationTest.java | 13 + ...tipleClusteringKeyScanIntegrationTest.java | 51 ++ ...geMultiplePartitionKeyIntegrationTest.java | 40 ++ ...ingleClusteringKeyScanIntegrationTest.java | 45 ++ ...rageSinglePartitionKeyIntegrationTest.java | 30 + .../objectstorage/ObjectStorageTestUtils.java | 19 + ...AdminIntegrationTestWithObjectStorage.java | 60 ++ ...ctionIntegrationTestWithObjectStorage.java | 13 + ...nScanIntegrationTestWithObjectStorage.java | 23 + ...ommitIntegrationTestWithObjectStorage.java | 25 + ...cificIntegrationTestWithObjectStorage.java | 13 + ...abledIntegrationTestWithObjectStorage.java | 13 + .../com/scalar/db/common/error/CoreError.java | 34 ++ .../db/storage/objectstorage/BlobConfig.java | 129 +++++ .../storage/objectstorage/BlobProvider.java | 8 + .../db/storage/objectstorage/BlobWrapper.java | 147 +++++ .../ClusteringKeyComparator.java | 36 ++ .../objectstorage/ColumnComparator.java | 70 +++ .../objectstorage/ColumnValueMapper.java | 79 +++ .../objectstorage/ConcatenationVisitor.java | 136 +++++ .../storage/objectstorage/JsonConvertor.java | 33 ++ .../db/storage/objectstorage/MapVisitor.java | 92 +++ .../objectstorage/MutateStatementHandler.java | 294 ++++++++++ .../storage/objectstorage/ObjectStorage.java | 138 +++++ .../objectstorage/ObjectStorageAdmin.java | 532 ++++++++++++++++++ .../objectstorage/ObjectStorageConfig.java | 45 ++ .../objectstorage/ObjectStorageMutation.java | 61 ++ .../ObjectStorageNamespaceMetadata.java | 41 ++ .../objectstorage/ObjectStorageOperation.java | 77 +++ .../ObjectStorageOperationChecker.java | 123 ++++ .../objectstorage/ObjectStorageProvider.java | 19 + .../objectstorage/ObjectStorageRecord.java | 116 ++++ .../ObjectStorageTableMetadata.java | 142 +++++ .../objectstorage/ObjectStorageUtils.java | 53 ++ .../objectstorage/ObjectStorageWrapper.java | 71 +++ .../ObjectStorageWrapperException.java | 20 + .../ObjectStorageWrapperResponse.java | 19 + .../objectstorage/PartitionIdentifier.java | 45 ++ .../objectstorage/ResultInterpreter.java | 53 ++ .../db/storage/objectstorage/ScannerImpl.java | 70 +++ .../objectstorage/SelectStatementHandler.java | 303 ++++++++++ .../objectstorage/StatementHandler.java | 132 +++++ ...m.scalar.db.api.DistributedStorageProvider | 1 + 61 files changed, 4084 insertions(+) create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java diff --git a/build.gradle b/build.gradle index eb755d09bf..137b1ebf4b 100644 --- a/build.gradle +++ b/build.gradle @@ -26,6 +26,7 @@ subprojects { slf4jVersion = '1.7.36' cassandraDriverVersion = '3.11.5' azureCosmosVersion = '4.67.0' + azureBlobVersion = '12.28.1' jooqVersion = '3.14.16' awssdkVersion = '2.31.3' commonsDbcp2Version = '2.13.0' diff --git a/core/build.gradle b/core/build.gradle index 239b40c55d..0d67a90f83 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -56,6 +56,16 @@ sourceSets { } resources.srcDir file('src/integration-test/resources') } + integrationTestObjectStorage { + java { + compileClasspath += main.output + test.output + runtimeClasspath += main.output + test.output + srcDir file('src/integration-test/java') + include '**/com/scalar/db/common/*.java' + include '**/com/scalar/db/storage/objectstorage/*.java' + } + resources.srcDir file('src/integration-test/resources') + } integrationTestMultiStorage { java { compileClasspath += main.output + test.output @@ -85,6 +95,9 @@ configurations { integrationTestJdbcImplementation.extendsFrom testImplementation integrationTestJdbcRuntimeOnly.extendsFrom testRuntimeOnly integrationTestJdbcCompileOnly.extendsFrom testCompileOnly + integrationTestObjectStorageImplementation.extendsFrom testImplementation + integrationTestObjectStorageRuntimeOnly.extendsFrom testRuntimeOnly + integrationTestObjectStorageCompileOnly.extendsFrom testCompileOnly integrationTestMultiStorageImplementation.extendsFrom testImplementation integrationTestMultiStorageRuntimeOnly.extendsFrom testRuntimeOnly integrationTestMultiStorageCompileOnly.extendsFrom testCompileOnly @@ -96,6 +109,7 @@ dependencies { implementation "org.slf4j:slf4j-api:${slf4jVersion}" implementation "com.datastax.cassandra:cassandra-driver-core:${cassandraDriverVersion}" implementation "com.azure:azure-cosmos:${azureCosmosVersion}" + implementation "com.azure:azure-storage-blob:${azureBlobVersion}" implementation "org.jooq:jooq:${jooqVersion}" implementation platform("software.amazon.awssdk:bom:${awssdkVersion}") implementation 'software.amazon.awssdk:applicationautoscaling' @@ -188,6 +202,17 @@ task integrationTestJdbc(type: Test) { } } +task integrationTestObjectStorage(type: Test) { + description = 'Runs the integration tests for object storages.' + group = 'verification' + testClassesDirs = sourceSets.integrationTestObjectStorage.output.classesDirs + classpath = sourceSets.integrationTestObjectStorage.runtimeClasspath + outputs.upToDateWhen { false } // ensures integration tests are run every time when called + options { + systemProperties(System.getProperties().findAll{it.key.toString().startsWith("scalardb")}) + } +} + task integrationTestMultiStorage(type: Test) { description = 'Runs the integration tests for multi-storage.' group = 'verification' diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..e7500ea596 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -0,0 +1,65 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitAdminIntegrationTestBase; +import com.scalar.db.util.AdminTestUtils; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ConsensusCommitAdminIntegrationTestWithObjectStorage + extends ConsensusCommitAdminIntegrationTestBase { + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..436a566dbb --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairIntegrationTestWithObjectStorage.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitAdminRepairIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitAdminRepairIntegrationTestWithObjectStorage + extends ConsensusCommitAdminRepairIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected void initialize(String testName) throws Exception { + super.initialize(testName); + adminTestUtils = new ObjectStorageAdminTestUtils(getProperties(testName)); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..815a4c88c0 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage + extends ConsensusCommitCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); + properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); + return properties; + } + + @Test + @Override + @Disabled("Cross partition scan with ordering is not supported in object storages") + public void scan_CrossPartitionScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..a5419fcce9 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java @@ -0,0 +1,36 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ConsensusCommitIntegrationTestWithObjectStorage + extends ConsensusCommitIntegrationTestBase { + @Override + protected Properties getProps(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() throws TransactionException {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() throws TransactionException {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void scan_ScanGivenForIndexColumnWithConjunctions_ShouldReturnRecords() + throws TransactionException {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..440e753212 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitNullMetadataIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitNullMetadataIntegrationTestWithObjectStorage + extends ConsensusCommitNullMetadataIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java new file mode 100644 index 0000000000..1ee909e9df --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java @@ -0,0 +1,18 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.common.ConsensusCommitTestUtils; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestUtils; +import java.util.Properties; + +public class ConsensusCommitObjectStorageEnv { + private ConsensusCommitObjectStorageEnv() {} + + public static Properties getProperties(String testName) { + Properties properties = ObjectStorageEnv.getProperties(testName); + + // Add testName as a coordinator schema suffix + ConsensusCommitIntegrationTestUtils.addSuffixToCoordinatorNamespace(properties, testName); + + return ConsensusCommitTestUtils.loadConsensusCommitProperties(properties); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..b7a7043f9d --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitSpecificIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ConsensusCommitSpecificIntegrationTestWithObjectStorage + extends ConsensusCommitSpecificIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void + scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void + scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void + scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void + scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void + scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..884e464008 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage + extends ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java new file mode 100644 index 0000000000..d348ebe778 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -0,0 +1,70 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageAdminIntegrationTestBase; +import com.scalar.db.util.AdminTestUtils; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ObjectStorageAdminIntegrationTest extends DistributedStorageAdminIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected boolean isIndexOnBooleanColumnSupported() { + return false; + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported in object storages") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java new file mode 100644 index 0000000000..20f52600e4 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java @@ -0,0 +1,18 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageAdminRepairIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageAdminRepairIntegrationTest + extends DistributedStorageAdminRepairIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected void initialize(String testName) throws Exception { + super.initialize(testName); + adminTestUtils = new ObjectStorageAdminTestUtils(getProperties(testName)); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java new file mode 100644 index 0000000000..4c2ceab265 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java @@ -0,0 +1,114 @@ +package com.scalar.db.storage.objectstorage; + +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.common.StorageSharedKeyCredential; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.util.AdminTestUtils; +import java.util.Properties; +import java.util.stream.Collectors; + +public class ObjectStorageAdminTestUtils extends AdminTestUtils { + private final BlobContainerClient client; + private final String metadataNamespace; + + public ObjectStorageAdminTestUtils(Properties properties) { + super(properties); + ObjectStorageConfig config = + ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); + client = + new BlobServiceClientBuilder() + .endpoint(config.getEndpoint()) + .credential(new StorageSharedKeyCredential(config.getUsername(), config.getPassword())) + .buildClient() + .getBlobContainerClient(config.getBucket()); + metadataNamespace = config.getMetadataNamespace(); + } + + @Override + public void dropNamespacesTable() throws Exception { + // Do nothing + // Blob does not have a concept of table + } + + @Override + public void dropMetadataTable() throws Exception { + // Do nothing + // Blob does not have a concept of table + } + + @Override + public void truncateNamespacesTable() throws Exception { + client + .listBlobs( + new ListBlobsOptions() + .setPrefix( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE, null)), + null) + .stream() + .map(BlobItem::getName) + .collect(Collectors.toList()) + .forEach( + key -> { + client.getBlobClient(key).delete(); + }); + } + + @Override + public void truncateMetadataTable() throws Exception { + client + .listBlobs( + new ListBlobsOptions() + .setPrefix( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE, null)), + null) + .stream() + .map(BlobItem::getName) + .collect(Collectors.toList()) + .forEach( + key -> { + client.getBlobClient(key).delete(); + }); + } + + @Override + public void corruptMetadata(String namespace, String table) throws Exception { + client + .getBlobClient(ObjectStorageUtils.getObjectKey(metadataNamespace, table, null)) + .upload(BinaryData.fromString("corrupted metadata"), true); + } + + @Override + public void dropNamespace(String namespace) throws Exception { + // Do nothing + // Blob does not have a concept of namespace + } + + @Override + public boolean namespaceExists(String namespace) throws Exception { + // Blob does not have a concept of namespace + return true; + } + + @Override + public boolean tableExists(String namespace, String table) throws Exception { + // Blob does not have a concept of table + return true; + } + + @Override + public void dropTable(String namespace, String table) throws Exception { + // Do nothing + // Blob does not have a concept of table + } + + @Override + public void close() throws Exception { + // Do nothing + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java new file mode 100644 index 0000000000..1514c98f76 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageColumnValueIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageColumnValueIntegrationTest + extends DistributedStorageColumnValueIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java new file mode 100644 index 0000000000..759dd22507 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.DistributedStorageConditionalMutationIntegrationTestBase; +import java.util.List; +import java.util.Properties; +import java.util.stream.Collectors; + +public class ObjectStorageConditionalMutationIntegrationTest + extends DistributedStorageConditionalMutationIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected List getOperatorAndDataTypeListForTest() { + return super.getOperatorAndDataTypeListForTest().stream() + .filter( + operatorAndDataType -> + operatorAndDataType.getOperator() == ConditionalExpression.Operator.EQ + || operatorAndDataType.getOperator() == ConditionalExpression.Operator.NE) + .collect(Collectors.toList()); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java new file mode 100644 index 0000000000..d611e892e9 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ObjectStorageCrossPartitionScanIntegrationTest + extends DistributedStorageCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Test + @Override + @Disabled("Cross partition scan with ordering is not supported in object storages") + public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java new file mode 100644 index 0000000000..d43055defb --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java @@ -0,0 +1,44 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.config.DatabaseConfig; +import java.util.Properties; + +public class ObjectStorageEnv { + private static final String PROP_OBJECT_STORAGE_ENDPOINT = "scalardb.object_storage.endpoint"; + private static final String PROP_OBJECT_STORAGE_USERNAME = "scalardb.object_storage.username"; + private static final String PROP_OBJECT_STORAGE_PASSWORD = "scalardb.object_storage.password"; + private static final String PROP_OBJECT_STORAGE_BUCKET = "scalardb.object_storage.storage_type"; + + private static final String DEFAULT_BLOB_ENDPOINT = "http://localhost:10000/"; + private static final String DEFAULT_BLOB_USERNAME = "devstoreaccount1"; + private static final String DEFAULT_BLOB_PASSWORD = + "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; + private static final String DEFAULT_BLOB_CONTAINER = "fake-container"; + + private ObjectStorageEnv() {} + + public static Properties getProperties(String testName) { + String accountName = System.getProperty(PROP_OBJECT_STORAGE_USERNAME, DEFAULT_BLOB_USERNAME); + String accountKey = System.getProperty(PROP_OBJECT_STORAGE_PASSWORD, DEFAULT_BLOB_PASSWORD); + String endpoint = + System.getProperty(PROP_OBJECT_STORAGE_ENDPOINT, DEFAULT_BLOB_ENDPOINT) + accountName; + String bucket = System.getProperty(PROP_OBJECT_STORAGE_BUCKET, DEFAULT_BLOB_CONTAINER); + + Properties properties = new Properties(); + properties.setProperty(DatabaseConfig.CONTACT_POINTS, endpoint); + properties.setProperty(DatabaseConfig.USERNAME, accountName); + properties.setProperty(DatabaseConfig.PASSWORD, accountKey); + properties.setProperty(DatabaseConfig.STORAGE, BlobConfig.STORAGE_NAME); + properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN, "true"); + properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_FILTERING, "true"); + properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_ORDERING, "false"); + properties.setProperty(BlobConfig.BUCKET, bucket); + + // Add testName as a metadata namespace suffix + properties.setProperty( + DatabaseConfig.SYSTEM_NAMESPACE_NAME, + DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME + "_" + testName); + + return properties; + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java new file mode 100644 index 0000000000..4441d72f49 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java @@ -0,0 +1,44 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ObjectStorageIntegrationTest extends DistributedStorageIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java new file mode 100644 index 0000000000..4610d84aed --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageJapaneseIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageJapaneseIntegrationTest + extends DistributedStorageJapaneseIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java new file mode 100644 index 0000000000..e3a93e8ff6 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java @@ -0,0 +1,51 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.List; +import java.util.Properties; +import java.util.stream.Collectors; + +public class ObjectStorageMultipleClusteringKeyScanIntegrationTest + extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected List getDataTypes() { + // Return types without BLOB because blob is not supported for clustering key for now + return super.getDataTypes().stream() + .filter(type -> type != DataType.BLOB) + .collect(Collectors.toList()); + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java new file mode 100644 index 0000000000..d3b077df18 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java @@ -0,0 +1,40 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.Properties; + +public class ObjectStorageMultiplePartitionKeyIntegrationTest + extends DistributedStorageMultiplePartitionKeyIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java new file mode 100644 index 0000000000..955b94330b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +public class ObjectStorageSingleClusteringKeyScanIntegrationTest + extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected List getClusteringKeyTypes() { + // Return types without BLOB because blob is not supported for clustering key for now + List clusteringKeyTypes = new ArrayList<>(); + for (DataType dataType : DataType.values()) { + if (dataType == DataType.BLOB) { + continue; + } + clusteringKeyTypes.add(dataType); + } + return clusteringKeyTypes; + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java new file mode 100644 index 0000000000..215993d078 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.Properties; + +public class ObjectStorageSinglePartitionKeyIntegrationTest + extends DistributedStorageSinglePartitionKeyIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java new file mode 100644 index 0000000000..0263043fed --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.TextColumn; +import com.scalar.db.util.TestUtils; +import java.util.stream.IntStream; + +public class ObjectStorageTestUtils { + public static TextColumn getMinTextValue(String columnName) { + // Since ObjectStorage can't handle an empty string correctly, we use "0" as the min value + return TextColumn.of(columnName, "0"); + } + + public static TextColumn getMaxTextValue(String columnName) { + // Since ObjectStorage can't handle 0xFF character correctly, we use "ZZZ..." as the max value + StringBuilder builder = new StringBuilder(); + IntStream.range(0, TestUtils.MAX_TEXT_COUNT).forEach(i -> builder.append('Z')); + return TextColumn.of(columnName, builder.toString()); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..3db4bae22b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -0,0 +1,60 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionAdminIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage + extends SingleCrudOperationTransactionAdminIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..7405f7e829 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionIntegrationTestBase; +import java.util.Properties; + +public class SingleCrudOperationTransactionIntegrationTestWithObjectStorage + extends SingleCrudOperationTransactionIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..3c666d908d --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProps1(String testName) { + Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); + properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); + return properties; + } + + @Test + @Override + @Disabled("Cross partition scan with ordering is not supported in object storages") + public void scan_ScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..1e3a43ae68 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java @@ -0,0 +1,25 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class TwoPhaseConsensusCommitIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitIntegrationTestBase { + + @Override + protected Properties getProps1(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} + + @Test + @Override + @Disabled("Index-related operations are not supported for object storages") + public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..1e4b66e32b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitSpecificIntegrationTestBase; +import java.util.Properties; + +public class TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitSpecificIntegrationTestBase { + + @Override + protected Properties getProperties1(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..38a95fd99e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; +import java.util.Properties; + +public class TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index 29b34dd033..2fd503ec39 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -824,6 +824,30 @@ public enum CoreError implements ScalarDbError { ""), DATA_LOADER_FILE_FORMAT_NOT_SUPPORTED( Category.USER_ERROR, "0178", "The provided file format is not supported : %s", "", ""), + OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED( + Category.USER_ERROR, + "0178", + "Cross-partition scan with ordering is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_IMPORT_NOT_SUPPORTED( + Category.USER_ERROR, + "0179", + "Import-related functionality is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_INDEX_NOT_SUPPORTED( + Category.USER_ERROR, + "0180", + "Index-related functionality is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER( + Category.USER_ERROR, + "0181", + "The value of the column %s in the primary key contains an illegal character. Value: %s", + "", + ""), // // Errors for the concurrency error category @@ -935,6 +959,14 @@ public enum CoreError implements ScalarDbError { "A transaction conflict occurred in the Insert operation", "", ""), + OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION( + Category.CONCURRENCY_ERROR, "0026", "An error occurred in the mutation. Details: %s", "", ""), + OBJECT_STORAGE_TRANSACTION_CONFLICT_OCCURRED_IN_MUTATION( + Category.CONCURRENCY_ERROR, + "0027", + "A transaction conflict occurred in the mutation. Details: %s", + "", + ""), // // Errors for the internal error category @@ -1087,6 +1119,8 @@ public enum CoreError implements ScalarDbError { "Something went wrong while scanning. Are you sure you are running in the correct transaction mode? Details: %s", "", ""), + OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION( + Category.INTERNAL_ERROR, "0049", "An error occurred in the selection. Details: %s", "", ""), // // Errors for the unknown transaction status error category diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java new file mode 100644 index 0000000000..fb8f9c05ec --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java @@ -0,0 +1,129 @@ +package com.scalar.db.storage.objectstorage; + +import static com.scalar.db.config.ConfigUtils.getInt; +import static com.scalar.db.config.ConfigUtils.getLong; +import static com.scalar.db.config.ConfigUtils.getString; + +import com.scalar.db.common.error.CoreError; +import com.scalar.db.config.DatabaseConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BlobConfig implements ObjectStorageConfig { + public static final String STORAGE_NAME = "blob"; + public static final String PREFIX = DatabaseConfig.PREFIX + STORAGE_NAME + "."; + public static final String BUCKET = PREFIX + "bucket"; + + public static final String PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = + PREFIX + "parallel_upload_block_size_in_bytes"; + public static final String PARALLEL_UPLOAD_MAX_PARALLELISM = + PREFIX + "parallel_upload_max_parallelism"; + public static final String PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = + PREFIX + "parallel_upload_threshold_in_bytes"; + public static final String REQUEST_TIMEOUT_IN_SECONDS = PREFIX + "request_timeout_in_seconds"; + + /** @deprecated As of 5.0, will be removed. */ + @Deprecated + public static final String TABLE_METADATA_NAMESPACE = PREFIX + "table_metadata.namespace"; + + private static final Logger logger = LoggerFactory.getLogger(BlobConfig.class); + private final String endpoint; + private final String username; + private final String password; + private final String bucket; + private final String metadataNamespace; + + private final long parallelUploadBlockSizeInBytes; + private final int parallelUploadMaxParallelism; + private final long parallelUploadThresholdInBytes; + private final int requestTimeoutInSeconds; + + public BlobConfig(DatabaseConfig databaseConfig) { + String storage = databaseConfig.getStorage(); + if (!storage.equals(STORAGE_NAME)) { + throw new IllegalArgumentException( + DatabaseConfig.STORAGE + " should be '" + STORAGE_NAME + "'"); + } + if (databaseConfig.getContactPoints().isEmpty()) { + throw new IllegalArgumentException(CoreError.INVALID_CONTACT_POINTS.buildMessage()); + } + endpoint = databaseConfig.getContactPoints().get(0); + username = databaseConfig.getUsername().orElse(null); + password = databaseConfig.getPassword().orElse(null); + if (!databaseConfig.getProperties().containsKey(BUCKET)) { + throw new IllegalArgumentException("Bucket name is not specified."); + } + bucket = databaseConfig.getProperties().getProperty(BUCKET); + + if (databaseConfig.getProperties().containsKey(TABLE_METADATA_NAMESPACE)) { + logger.warn( + "The configuration property \"" + + TABLE_METADATA_NAMESPACE + + "\" is deprecated and will be removed in 5.0.0."); + metadataNamespace = + getString( + databaseConfig.getProperties(), + TABLE_METADATA_NAMESPACE, + DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME); + } else { + metadataNamespace = databaseConfig.getSystemNamespaceName(); + } + + parallelUploadBlockSizeInBytes = + getLong( + databaseConfig.getProperties(), PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, 50 * 1024 * 1024); + parallelUploadMaxParallelism = + getInt(databaseConfig.getProperties(), PARALLEL_UPLOAD_MAX_PARALLELISM, 4); + parallelUploadThresholdInBytes = + getLong( + databaseConfig.getProperties(), PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, 100 * 1024 * 1024); + requestTimeoutInSeconds = + getInt(databaseConfig.getProperties(), REQUEST_TIMEOUT_IN_SECONDS, 15); + } + + @Override + public String getStorageName() { + return STORAGE_NAME; + } + + @Override + public String getEndpoint() { + return endpoint; + } + + @Override + public String getUsername() { + return username; + } + + @Override + public String getPassword() { + return password; + } + + @Override + public String getBucket() { + return bucket; + } + + @Override + public String getMetadataNamespace() { + return metadataNamespace; + } + + public long getParallelUploadBlockSizeInBytes() { + return parallelUploadBlockSizeInBytes; + } + + public int getParallelUploadMaxParallelism() { + return parallelUploadMaxParallelism; + } + + public long getParallelUploadThresholdInBytes() { + return parallelUploadThresholdInBytes; + } + + public int getRequestTimeoutInSeconds() { + return requestTimeoutInSeconds; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java new file mode 100644 index 0000000000..9080036964 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java @@ -0,0 +1,8 @@ +package com.scalar.db.storage.objectstorage; + +public class BlobProvider implements ObjectStorageProvider { + @Override + public String getName() { + return BlobConfig.STORAGE_NAME; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java new file mode 100644 index 0000000000..33f37baeac --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java @@ -0,0 +1,147 @@ +package com.scalar.db.storage.objectstorage; + +import com.azure.core.http.HttpHeaderName; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobDownloadContentResponse; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.blob.options.BlobParallelUploadOptions; +import java.time.Duration; +import java.util.Set; +import java.util.stream.Collectors; + +public class BlobWrapper implements ObjectStorageWrapper { + public static final String STORAGE_NAME = "blob"; + + private final BlobContainerClient client; + private final Duration requestTimeoutInSeconds; + private final ParallelTransferOptions parallelTransferOptions; + + public BlobWrapper(BlobContainerClient client, BlobConfig config) { + this.client = client; + this.requestTimeoutInSeconds = Duration.ofSeconds(config.getRequestTimeoutInSeconds()); + this.parallelTransferOptions = + new ParallelTransferOptions() + .setBlockSizeLong(config.getParallelUploadBlockSizeInBytes()) + .setMaxConcurrency(config.getParallelUploadMaxParallelism()) + .setMaxSingleUploadSizeLong(config.getParallelUploadThresholdInBytes()); + } + + @Override + public ObjectStorageWrapperResponse get(String key) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + BlobDownloadContentResponse response = + blobClient.downloadContentWithResponse(null, null, requestTimeoutInSeconds, null); + String data = response.getValue().toString(); + String eTag = response.getHeaders().getValue(HttpHeaderName.ETAG); + return new ObjectStorageWrapperResponse(data, eTag); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + } + throw e; + } + } + + @Override + public Set getKeys(String prefix) { + return client.listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) + .stream() + .map(BlobItem::getName) + .collect(Collectors.toSet()); + } + + @Override + public void insert(String key, String object) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + BlobParallelUploadOptions options = + new BlobParallelUploadOptions(BinaryData.fromString(object)) + .setRequestConditions(new BlobRequestConditions().setIfNoneMatch("*")) + .setParallelTransferOptions(parallelTransferOptions); + blobClient.uploadWithResponse(options, requestTimeoutInSeconds, null); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.ALREADY_EXISTS, e); + } + throw e; + } + } + + @Override + public void update(String key, String object, String version) + throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + BlobParallelUploadOptions options = + new BlobParallelUploadOptions(BinaryData.fromString(object)) + .setRequestConditions(new BlobRequestConditions().setIfMatch(version)) + .setParallelTransferOptions(parallelTransferOptions); + blobClient.uploadWithResponse(options, requestTimeoutInSeconds, null); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH, e); + } + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + } + throw e; + } + } + + @Override + public void delete(String key) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + blobClient.delete(); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + } + throw e; + } + } + + @Override + public void delete(String key, String version) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + blobClient.deleteWithResponse( + null, new BlobRequestConditions().setIfMatch(version), requestTimeoutInSeconds, null); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH, e); + } + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new ObjectStorageWrapperException( + ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + } + throw e; + } + } + + @Override + public void deleteByPrefix(String prefix) { + client + .listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) + .forEach(blobItem -> client.getBlobClient(blobItem.getName()).delete()); + } + + @Override + public void close() { + // BlobContainerClient does not have a close method + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java new file mode 100644 index 0000000000..7068e8ef43 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java @@ -0,0 +1,36 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.Comparator; +import java.util.Map; + +public class ClusteringKeyComparator implements Comparator> { + private final TableMetadata metadata; + + public ClusteringKeyComparator(TableMetadata metadata) { + this.metadata = metadata; + } + + @Override + public int compare(Map clusteringKey1, Map clusteringKey2) { + for (String columnName : metadata.getClusteringKeyNames()) { + Scan.Ordering.Order order = metadata.getClusteringOrder(columnName); + + Column column1 = + ColumnValueMapper.convert( + clusteringKey1.get(columnName), columnName, metadata.getColumnDataType(columnName)); + Column column2 = + ColumnValueMapper.convert( + clusteringKey2.get(columnName), columnName, metadata.getColumnDataType(columnName)); + + int cmp = + new ColumnComparator(metadata.getColumnDataType(columnName)).compare(column1, column2); + if (cmp != 0) { + return order == Scan.Ordering.Order.ASC ? cmp : -cmp; + } + } + return 0; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java new file mode 100644 index 0000000000..bdb6ec25f3 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java @@ -0,0 +1,70 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import java.util.Comparator; + +public class ColumnComparator implements Comparator> { + private final DataType dataType; + + public ColumnComparator(DataType dataType) { + this.dataType = dataType; + } + + @Override + public int compare(Column o1, Column o2) { + if (o1.getDataType() != dataType || o2.getDataType() != dataType) { + throw new IllegalArgumentException("The columns are not of the specified data type."); + } + int cmp; + switch (dataType) { + case BOOLEAN: + cmp = ((BooleanColumn) o1).compareTo((BooleanColumn) o2); + break; + case INT: + cmp = ((IntColumn) o1).compareTo((IntColumn) o2); + break; + case BIGINT: + cmp = ((BigIntColumn) o1).compareTo((BigIntColumn) o2); + break; + case FLOAT: + cmp = ((FloatColumn) o1).compareTo((FloatColumn) o2); + break; + case DOUBLE: + cmp = ((DoubleColumn) o1).compareTo((DoubleColumn) o2); + break; + case TEXT: + cmp = ((TextColumn) o1).compareTo((TextColumn) o2); + break; + case BLOB: + cmp = ((BlobColumn) o1).compareTo((BlobColumn) o2); + break; + case DATE: + cmp = ((DateColumn) o1).compareTo((DateColumn) o2); + break; + case TIME: + cmp = ((TimeColumn) o1).compareTo((TimeColumn) o2); + break; + case TIMESTAMP: + cmp = ((TimestampColumn) o1).compareTo((TimestampColumn) o2); + break; + case TIMESTAMPTZ: + cmp = ((TimestampTZColumn) o1).compareTo((TimestampTZColumn) o2); + break; + default: + throw new AssertionError(); + } + return cmp; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java new file mode 100644 index 0000000000..34e2b2d780 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java @@ -0,0 +1,79 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.util.Base64; +import javax.annotation.Nullable; + +public class ColumnValueMapper { + public static Column convert(@Nullable Object recordValue, String name, DataType dataType) { + switch (dataType) { + case BOOLEAN: + return recordValue == null + ? BooleanColumn.ofNull(name) + : BooleanColumn.of(name, (boolean) recordValue); + case INT: + return recordValue == null + ? IntColumn.ofNull(name) + : IntColumn.of(name, ((Number) recordValue).intValue()); + case BIGINT: + return recordValue == null + ? BigIntColumn.ofNull(name) + : BigIntColumn.of(name, ((Number) recordValue).longValue()); + case FLOAT: + return recordValue == null + ? FloatColumn.ofNull(name) + : FloatColumn.of(name, ((Number) recordValue).floatValue()); + case DOUBLE: + return recordValue == null + ? DoubleColumn.ofNull(name) + : DoubleColumn.of(name, ((Number) recordValue).doubleValue()); + case TEXT: + return recordValue == null + ? TextColumn.ofNull(name) + : TextColumn.of(name, (String) recordValue); + case BLOB: + return recordValue == null + ? BlobColumn.ofNull(name) + : BlobColumn.of(name, Base64.getDecoder().decode((String) recordValue)); + case DATE: + return recordValue == null + ? DateColumn.ofNull(name) + : DateColumn.of( + name, TimeRelatedColumnEncodingUtils.decodeDate(((Number) recordValue).intValue())); + case TIME: + return recordValue == null + ? TimeColumn.ofNull(name) + : TimeColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTime(((Number) recordValue).longValue())); + case TIMESTAMP: + return recordValue == null + ? TimestampColumn.ofNull(name) + : TimestampColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestamp(((Number) recordValue).longValue())); + case TIMESTAMPTZ: + return recordValue == null + ? TimestampTZColumn.ofNull(name) + : TimestampTZColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + ((Number) recordValue).longValue())); + default: + throw new AssertionError(); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java new file mode 100644 index 0000000000..3141ed3ff7 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java @@ -0,0 +1,136 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import javax.annotation.concurrent.NotThreadSafe; + +/** A visitor class to make a concatenated key string for the partition key. */ +@NotThreadSafe +public class ConcatenationVisitor implements ColumnVisitor { + private final List columns; + + public ConcatenationVisitor() { + columns = new ArrayList<>(); + } + + public String build() { + return String.join(String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), columns); + } + + /** + * Sets the specified {@code BooleanColumn} to the key string + * + * @param column a {@code BooleanColumn} to be set + */ + @Override + public void visit(BooleanColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getBooleanValue())); + } + + /** + * Sets the specified {@code IntColumn} to the key string + * + * @param column a {@code IntColumn} to be set + */ + @Override + public void visit(IntColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getIntValue())); + } + + /** + * Sets the specified {@code BigIntColumn} to the key string + * + * @param column a {@code BigIntColumn} to be set + */ + @Override + public void visit(BigIntColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getBigIntValue())); + } + + /** + * Sets the specified {@code FloatColumn} to the key string + * + * @param column a {@code FloatColumn} to be set + */ + @Override + public void visit(FloatColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getFloatValue())); + } + + /** + * Sets the specified {@code DoubleColumn} to the key string + * + * @param column a {@code DoubleColumn} to be set + */ + @Override + public void visit(DoubleColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getDoubleValue())); + } + + /** + * Sets the specified {@code TextColumn} to the key string + * + * @param column a {@code TextColumn} to be set + */ + @Override + public void visit(TextColumn column) { + assert !column.hasNullValue(); + column.getValue().ifPresent(columns::add); + } + + /** + * Sets the specified {@code BlobColumn} to the key string + * + * @param column a {@code BlobColumn} to be set + */ + @Override + public void visit(BlobColumn column) { + assert !column.hasNullValue(); + // Use Base64 encoding + columns.add( + Base64.getUrlEncoder().withoutPadding().encodeToString(column.getBlobValueAsBytes())); + } + + @Override + public void visit(DateColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimeColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimestampColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimestampTZColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java new file mode 100644 index 0000000000..2530bb8bd8 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java @@ -0,0 +1,33 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; + +public class JsonConvertor { + private static final ObjectMapper mapper = new ObjectMapper(); + + static { + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + mapper.configure(SerializationFeature.WRAP_ROOT_VALUE, false); + mapper.registerModule(new JavaTimeModule()); + } + + public static T deserialize(String json, TypeReference typeReference) { + try { + return mapper.readValue(json, typeReference); + } catch (Exception e) { + throw new RuntimeException("Failed to deserialize the object.", e); + } + } + + public static String serialize(T object) { + try { + return mapper.writeValueAsString(object); + } catch (Exception e) { + throw new RuntimeException("Failed to serialize the object.", e); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java new file mode 100644 index 0000000000..6d9e2b4167 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java @@ -0,0 +1,92 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe +public class MapVisitor implements ColumnVisitor { + private final Map values = new HashMap<>(); + + @SuppressFBWarnings("EI_EXPOSE_REP") + public Map get() { + return values; + } + + @Override + public void visit(BooleanColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBooleanValue()); + } + + @Override + public void visit(IntColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getIntValue()); + } + + @Override + public void visit(BigIntColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBigIntValue()); + } + + @Override + public void visit(FloatColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getFloatValue()); + } + + @Override + public void visit(DoubleColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getDoubleValue()); + } + + @Override + public void visit(TextColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getTextValue()); + } + + @Override + public void visit(BlobColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBlobValue()); + } + + @Override + public void visit(DateColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimeColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampTZColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java new file mode 100644 index 0000000000..457620111b --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java @@ -0,0 +1,294 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.api.Delete; +import com.scalar.db.api.DeleteIf; +import com.scalar.db.api.DeleteIfExists; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.PutIf; +import com.scalar.db.api.PutIfExists; +import com.scalar.db.api.PutIfNotExists; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.exception.storage.RetriableExecutionException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class MutateStatementHandler extends StatementHandler { + public MutateStatementHandler( + ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + super(wrapper, metadataManager); + } + + public void handle(Mutation mutation) throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); + ObjectStorageMutation objectStorageMutation = + new ObjectStorageMutation(mutation, tableMetadata); + mutate( + getNamespace(mutation), + getTable(mutation), + objectStorageMutation.getConcatenatedPartitionKey(), + Collections.singletonList(mutation)); + } + + public void handle(List mutations) throws ExecutionException { + Map> mutationPerPartition = new HashMap<>(); + for (Mutation mutation : mutations) { + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); + ObjectStorageMutation objectStorageMutation = + new ObjectStorageMutation(mutation, tableMetadata); + String partitionKey = objectStorageMutation.getConcatenatedPartitionKey(); + PartitionIdentifier partitionIdentifier = + PartitionIdentifier.of(getNamespace(mutation), getTable(mutation), partitionKey); + mutationPerPartition + .computeIfAbsent(partitionIdentifier, k -> new ArrayList<>()) + .add(mutation); + } + for (Map.Entry> entry : mutationPerPartition.entrySet()) { + PartitionIdentifier partitionIdentifier = entry.getKey(); + mutate( + partitionIdentifier.getNamespaceName(), + partitionIdentifier.getTableName(), + partitionIdentifier.getPartitionName(), + entry.getValue()); + } + } + + public void mutate( + String namespaceName, String tableName, String partitionKey, List mutations) + throws ExecutionException { + Map readVersionMap = new HashMap<>(); + Map partition = + getPartition(namespaceName, tableName, partitionKey, readVersionMap); + for (Mutation mutation : mutations) { + if (mutation instanceof Put) { + putInternal(partition, (Put) mutation); + } else { + assert mutation instanceof Delete; + deleteInternal(partition, (Delete) mutation); + } + } + applyPartitionWrite(namespaceName, tableName, partitionKey, partition, readVersionMap); + } + + private void putInternal(Map partition, Put put) + throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(put); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, tableMetadata); + if (!put.getCondition().isPresent()) { + ObjectStorageRecord existingRecord = partition.get(mutation.getConcatenatedKey()); + if (existingRecord == null) { + partition.put(mutation.getConcatenatedKey(), mutation.makeRecord()); + } else { + partition.put(mutation.getConcatenatedKey(), mutation.makeRecord(existingRecord)); + } + } else if (put.getCondition().get() instanceof PutIfNotExists) { + if (partition.containsKey(mutation.getConcatenatedKey())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage( + "The specified record already exists: key=" + mutation.getConcatenatedKey())); + } + partition.put(mutation.getConcatenatedKey(), mutation.makeRecord()); + } else if (put.getCondition().get() instanceof PutIfExists) { + ObjectStorageRecord existingRecord = partition.get(mutation.getConcatenatedKey()); + if (existingRecord == null) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage( + "The specified record does not exist: key=" + mutation.getConcatenatedKey())); + } + partition.put(mutation.getConcatenatedKey(), mutation.makeRecord(existingRecord)); + } else { + assert put.getCondition().get() instanceof PutIf; + ObjectStorageRecord existingRecord = partition.get(mutation.getConcatenatedKey()); + if (existingRecord == null) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage( + "The specified record does not exist: key=" + mutation.getConcatenatedKey())); + } + try { + validateConditions( + partition.get(mutation.getConcatenatedKey()), + put.getCondition().get().getExpressions(), + metadataManager.getTableMetadata(mutation.getOperation())); + } catch (ExecutionException e) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage( + "The specified conditions are not met: " + put.getCondition()), + e); + } + partition.put(mutation.getConcatenatedKey(), mutation.makeRecord(existingRecord)); + } + } + + private void deleteInternal(Map partition, Delete delete) + throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(delete); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, tableMetadata); + if (!delete.getCondition().isPresent()) { + partition.remove(mutation.getConcatenatedKey()); + } else if (delete.getCondition().get() instanceof DeleteIfExists) { + if (!partition.containsKey(mutation.getConcatenatedKey())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage("Record not found")); + } + partition.remove(mutation.getConcatenatedKey()); + } else { + assert delete.getCondition().get() instanceof DeleteIf; + if (!partition.containsKey(mutation.getConcatenatedKey())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage("Record not found")); + } + try { + validateConditions( + partition.get(mutation.getConcatenatedKey()), + delete.getCondition().get().getExpressions(), + metadataManager.getTableMetadata(mutation.getOperation())); + } catch (ExecutionException e) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage( + "The specified conditions are not met: " + delete.getCondition()), + e); + } + partition.remove(mutation.getConcatenatedKey()); + } + } + + private void applyPartitionWrite( + String namespaceName, + String tableName, + String partitionKey, + Map partition, + Map readVersionMap) + throws ExecutionException { + if (readVersionMap.containsKey( + PartitionIdentifier.of(namespaceName, tableName, partitionKey))) { + String readVersion = + readVersionMap.get(PartitionIdentifier.of(namespaceName, tableName, partitionKey)); + if (!partition.isEmpty()) { + updatePartition(namespaceName, tableName, partitionKey, partition, readVersion); + } else { + deletePartition(namespaceName, tableName, partitionKey, readVersion); + } + } else { + if (!partition.isEmpty()) { + insertPartition(namespaceName, tableName, partitionKey, partition); + } + } + } + + private Map getPartition( + String namespaceName, + String tableName, + String partitionKey, + Map readVersionMap) + throws ExecutionException { + String objectKey = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey); + try { + ObjectStorageWrapperResponse response = wrapper.get(objectKey); + readVersionMap.put( + PartitionIdentifier.of(namespaceName, tableName, partitionKey), response.getVersion()); + return JsonConvertor.deserialize( + response.getPayload(), new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + return new HashMap<>(); + } + throw new ExecutionException( + String.format( + "Failed to get partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey), + e); + } + } + + private void insertPartition( + String namespaceName, + String tableName, + String partitionKey, + Map partition) + throws ExecutionException { + try { + wrapper.insert( + ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), + JsonConvertor.serialize(partition)); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.ALREADY_EXISTS) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_TRANSACTION_CONFLICT_OCCURRED_IN_MUTATION.buildMessage( + String.format( + "Conflict occurred while inserting partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey)), + e); + } + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage( + String.format( + "Failed to insert partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey)), + e); + } + } + + private void updatePartition( + String namespaceName, + String tableName, + String partitionKey, + Map partition, + String readVersion) + throws ExecutionException { + try { + wrapper.update( + ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), + JsonConvertor.serialize(partition), + readVersion); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND + || e.getStatusCode() == ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_TRANSACTION_CONFLICT_OCCURRED_IN_MUTATION.buildMessage( + String.format( + "Conflict occurred while updating partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey)), + e); + } + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage( + String.format( + "Failed to update partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey)), + e); + } + } + + private void deletePartition( + String namespaceName, String tableName, String partitionKey, String readVersion) + throws ExecutionException { + try { + wrapper.delete( + ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), readVersion); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND + || e.getStatusCode() == ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_TRANSACTION_CONFLICT_OCCURRED_IN_MUTATION.buildMessage( + String.format( + "Conflict occurred while deleting partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey)), + e); + } + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage( + String.format( + "Failed to delete partition: namespace='%s', table='%s', partition='%s'", + namespaceName, tableName, partitionKey)), + e); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java new file mode 100644 index 0000000000..267bba370a --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java @@ -0,0 +1,138 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.common.AbstractDistributedStorage; +import com.scalar.db.common.FilterableScanner; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ObjectStorage extends AbstractDistributedStorage { + private static final Logger logger = LoggerFactory.getLogger(ObjectStorage.class); + + private final ObjectStorageWrapper wrapper; + private final SelectStatementHandler selectStatementHandler; + private final MutateStatementHandler mutateStatementHandler; + private final OperationChecker operationChecker; + + public ObjectStorage(DatabaseConfig databaseConfig) { + super(databaseConfig); + if (databaseConfig.isCrossPartitionScanOrderingEnabled()) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED.buildMessage()); + } + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(databaseConfig); + wrapper = ObjectStorageUtils.getObjectStorageWrapper(objectStorageConfig); + TableMetadataManager metadataManager = + new TableMetadataManager( + new ObjectStorageAdmin(wrapper, objectStorageConfig), + databaseConfig.getMetadataCacheExpirationTimeSecs()); + operationChecker = new ObjectStorageOperationChecker(databaseConfig, metadataManager); + selectStatementHandler = new SelectStatementHandler(wrapper, metadataManager); + mutateStatementHandler = new MutateStatementHandler(wrapper, metadataManager); + logger.info("ObjectStorage object is created properly"); + } + + @Override + public Optional get(Get get) throws ExecutionException { + get = copyAndSetTargetToIfNot(get); + operationChecker.check(get); + Scanner scanner = null; + try { + if (get.getConjunctions().isEmpty()) { + scanner = selectStatementHandler.handle(get); + } else { + scanner = + new FilterableScanner( + get, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(get))); + } + Optional ret = scanner.one(); + if (!scanner.one().isPresent()) { + return ret; + } else { + throw new IllegalArgumentException( + CoreError.GET_OPERATION_USED_FOR_NON_EXACT_MATCH_SELECTION.buildMessage(get)); + } + } finally { + if (scanner != null) { + try { + scanner.close(); + } catch (IOException e) { + logger.warn("Failed to close the scanner", e); + } + } + } + } + + @Override + public Scanner scan(Scan scan) throws ExecutionException { + scan = copyAndSetTargetToIfNot(scan); + operationChecker.check(scan); + if (scan.getConjunctions().isEmpty()) { + return selectStatementHandler.handle(scan); + } else { + return new FilterableScanner( + scan, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(scan))); + } + } + + @Override + public void put(Put put) throws ExecutionException { + put = copyAndSetTargetToIfNot(put); + operationChecker.check(put); + mutateStatementHandler.handle(put); + } + + @Override + public void put(List puts) throws ExecutionException { + mutate(puts); + } + + @Override + public void delete(Delete delete) throws ExecutionException { + delete = copyAndSetTargetToIfNot(delete); + operationChecker.check(delete); + mutateStatementHandler.handle(delete); + } + + @Override + public void delete(List deletes) throws ExecutionException { + mutate(deletes); + } + + @Override + public void mutate(List mutations) throws ExecutionException { + if (mutations.size() == 1) { + Mutation mutation = mutations.get(0); + if (mutation instanceof Put) { + put((Put) mutation); + return; + } else if (mutation instanceof Delete) { + delete((Delete) mutation); + return; + } + } + mutations = copyAndSetTargetToIfNot(mutations); + operationChecker.check(mutations); + mutateStatementHandler.handle(mutations); + } + + @Override + public void close() { + wrapper.close(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java new file mode 100644 index 0000000000..232526284b --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -0,0 +1,532 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.base.Splitter; +import com.google.inject.Inject; +import com.scalar.db.api.DistributedStorageAdmin; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.util.ScalarDbUtils; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +public class ObjectStorageAdmin implements DistributedStorageAdmin { + public static final String NAMESPACE_METADATA_TABLE = "namespaces"; + public static final String TABLE_METADATA_TABLE = "metadata"; + + private final ObjectStorageWrapper wrapper; + private final String metadataNamespace; + + @Inject + public ObjectStorageAdmin(DatabaseConfig databaseConfig) { + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(databaseConfig); + wrapper = ObjectStorageUtils.getObjectStorageWrapper(objectStorageConfig); + metadataNamespace = objectStorageConfig.getMetadataNamespace(); + } + + public ObjectStorageAdmin(ObjectStorageWrapper wrapper, ObjectStorageConfig objectStorageConfig) { + this.wrapper = wrapper; + metadataNamespace = objectStorageConfig.getMetadataNamespace(); + } + + @Override + public TableMetadata getImportTableMetadata( + String namespace, String table, Map overrideColumnsType) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_IMPORT_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void addRawColumnToTable( + String namespace, String table, String columnName, DataType columnType) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_IMPORT_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void close() { + wrapper.close(); + } + + @Override + public void createNamespace(String namespace, Map options) + throws ExecutionException { + try { + insertNamespaceMetadata(namespace); + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to create the namespace %s", namespace), e); + } + } + + @Override + public void createTable( + String namespace, String table, TableMetadata metadata, Map options) + throws ExecutionException { + try { + insertTableMetadata(namespace, table, metadata); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to create the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void dropTable(String namespace, String table) throws ExecutionException { + try { + deleteTableData(namespace, table); + deleteTableMetadata(namespace, table); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to drop the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void createIndex( + String namespace, String table, String columnName, Map options) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void dropIndex(String namespace, String table, String columnName) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void dropNamespace(String namespace) throws ExecutionException { + try { + deleteNamespaceMetadata(namespace); + } catch (Exception e) { + throw new ExecutionException(String.format("Failed to drop the namespace %s", namespace), e); + } + } + + @Override + public void truncateTable(String namespace, String table) throws ExecutionException { + try { + deleteTableData(namespace, table); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to truncate the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Nullable + @Override + public TableMetadata getTableMetadata(String namespace, String table) throws ExecutionException { + try { + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map metadataTable = getTableMetadataTable(); + ObjectStorageTableMetadata tableMetadata = metadataTable.get(tableMetadataKey); + return tableMetadata != null ? tableMetadata.toTableMetadata() : null; + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to get the table metadata of %s", + ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public Set getNamespaceTableNames(String namespace) throws ExecutionException { + try { + if (!namespaceExists(namespace)) { + return Collections.emptySet(); + } + Map metadataTable = getTableMetadataTable(); + return metadataTable.keySet().stream() + .filter( + tableMetadataKey -> + getNamespaceNameFromTableMetadataKey(tableMetadataKey).equals(namespace)) + .map(ObjectStorageAdmin::getTableNameFromTableMetadataKey) + .collect(Collectors.toSet()); + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to get the tables in the namespace %s", namespace), e); + } + } + + @Override + public boolean namespaceExists(String namespace) throws ExecutionException { + if (metadataNamespace.equals(namespace)) { + return true; + } + try { + return getNamespaceMetadataTable().containsKey(namespace); + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to check the existence of the namespace %s", namespace), e); + } + } + + @Override + public void repairNamespace(String namespace, Map options) + throws ExecutionException { + try { + upsertNamespaceMetadata(namespace); + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to repair the namespace %s", namespace), e); + } + } + + @Override + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_IMPORT_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void repairTable( + String namespace, String table, TableMetadata metadata, Map options) + throws ExecutionException { + try { + upsertTableMetadata(namespace, table, metadata); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to repair the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void addNewColumnToTable( + String namespace, String table, String columnName, DataType columnType) + throws ExecutionException { + try { + TableMetadata currentTableMetadata = getTableMetadata(namespace, table); + TableMetadata updatedTableMetadata = + TableMetadata.newBuilder(currentTableMetadata).addColumn(columnName, columnType).build(); + upsertTableMetadata(namespace, table, updatedTableMetadata); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to add a new column to the table %s", + ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public Set getNamespaceNames() throws ExecutionException { + try { + return getNamespaceMetadataTable().keySet(); + } catch (Exception e) { + throw new ExecutionException("Failed to get the namespace names", e); + } + } + + @Override + public void upgrade(Map options) throws ExecutionException { + try { + Map metadataTable = getTableMetadataTable(); + List namespaceNames = + metadataTable.keySet().stream() + .map(ObjectStorageAdmin::getNamespaceNameFromTableMetadataKey) + .distinct() + .collect(Collectors.toList()); + for (String namespaceName : namespaceNames) { + upsertNamespaceMetadata(namespaceName); + } + } catch (Exception e) { + throw new ExecutionException("Failed to upgrade", e); + } + } + + private void insertNamespaceMetadata(String namespace) throws ExecutionException { + try { + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + if (metadataTable.containsKey(namespace)) { + throw new ExecutionException( + String.format("The namespace metadata already exists: %s", namespace)); + } + if (metadataTable.isEmpty()) { + insertMetadataTable( + NAMESPACE_METADATA_TABLE, + Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace))); + } else { + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + updateMetadataTable( + NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to insert the namespace metadata: %s", namespace), e); + } + } + + private void insertTableMetadata(String namespace, String table, TableMetadata metadata) + throws ExecutionException { + String tableMetadataKey = getTableMetadataKey(namespace, table); + try { + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + if (metadataTable.containsKey(tableMetadataKey)) { + throw new ExecutionException( + String.format("The table metadata already exists: %s", tableMetadataKey)); + } + if (metadataTable.isEmpty()) { + insertMetadataTable( + TABLE_METADATA_TABLE, + Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata))); + } else { + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to insert the table metadata: %s", tableMetadataKey), e); + } + } + + private void upsertNamespaceMetadata(String namespace) throws ExecutionException { + try { + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + if (metadataTable.isEmpty()) { + insertMetadataTable( + NAMESPACE_METADATA_TABLE, + Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace))); + } else { + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + updateMetadataTable( + NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to upsert the namespace metadata: %s", namespace), e); + } + } + + private void upsertTableMetadata(String namespace, String table, TableMetadata metadata) + throws ExecutionException { + String tableMetadataKey = getTableMetadataKey(namespace, table); + try { + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + if (metadataTable.isEmpty()) { + insertMetadataTable( + TABLE_METADATA_TABLE, + Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata))); + } else { + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to upsert the table metadata: %s", tableMetadataKey), e); + } + } + + private void deleteNamespaceMetadata(String namespace) throws ExecutionException { + try { + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + if (metadataTable.isEmpty() || !metadataTable.containsKey(namespace)) { + throw new ExecutionException( + String.format("The namespace metadata does not exist: %s", namespace)); + } + metadataTable.remove(namespace); + String readVersion = readVersionMap.get(NAMESPACE_METADATA_TABLE); + if (metadataTable.isEmpty()) { + deleteMetadataTable(NAMESPACE_METADATA_TABLE, readVersion); + } else { + updateMetadataTable(NAMESPACE_METADATA_TABLE, metadataTable, readVersion); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to delete the namespace metadata: %s", namespace), e); + } + } + + private void deleteTableMetadata(String namespace, String table) throws ExecutionException { + String tableMetadataKey = getTableMetadataKey(namespace, table); + try { + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + if (metadataTable.isEmpty() || !metadataTable.containsKey(tableMetadataKey)) { + throw new ExecutionException( + String.format("The table metadata does not exist: %s", tableMetadataKey)); + } + metadataTable.remove(tableMetadataKey); + String readVersion = readVersionMap.get(TABLE_METADATA_TABLE); + if (metadataTable.isEmpty()) { + deleteMetadataTable(TABLE_METADATA_TABLE, readVersion); + } else { + updateMetadataTable(TABLE_METADATA_TABLE, metadataTable, readVersion); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to delete the table metadata: %s", tableMetadataKey), e); + } + } + + private Map getNamespaceMetadataTable() + throws ExecutionException { + try { + ObjectStorageWrapperResponse response = + wrapper.get( + ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE, null)); + return JsonConvertor.deserialize( + response.getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + return Collections.emptyMap(); + } + throw new ExecutionException("Failed to get the metadata table.", e); + } + } + + private Map getNamespaceMetadataTable( + Map readVersionMap) throws ExecutionException { + try { + ObjectStorageWrapperResponse response = + wrapper.get( + ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE, null)); + readVersionMap.put(NAMESPACE_METADATA_TABLE, response.getVersion()); + return JsonConvertor.deserialize( + response.getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + return Collections.emptyMap(); + } + throw new ExecutionException("Failed to get the metadata table.", e); + } + } + + private Map getTableMetadataTable() + throws ExecutionException { + try { + ObjectStorageWrapperResponse response = + wrapper.get( + ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE, null)); + return JsonConvertor.deserialize( + response.getPayload(), new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + return Collections.emptyMap(); + } + throw new ExecutionException("Failed to get the metadata table.", e); + } + } + + private Map getTableMetadataTable( + Map readVersionMap) throws ExecutionException { + try { + ObjectStorageWrapperResponse response = + wrapper.get( + ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE, null)); + readVersionMap.put(TABLE_METADATA_TABLE, response.getVersion()); + return JsonConvertor.deserialize( + response.getPayload(), new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + return Collections.emptyMap(); + } + throw new ExecutionException("Failed to get the metadata table.", e); + } + } + + private void insertMetadataTable(String table, Map metadataTable) + throws ExecutionException { + try { + wrapper.insert( + ObjectStorageUtils.getObjectKey(metadataNamespace, table, null), + JsonConvertor.serialize(metadataTable)); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException("Failed to insert the metadata table.", e); + } + } + + private void updateMetadataTable( + String table, Map metadataTable, String readVersion) throws ExecutionException { + try { + wrapper.update( + ObjectStorageUtils.getObjectKey(metadataNamespace, table, null), + JsonConvertor.serialize(metadataTable), + readVersion); + } catch (Exception e) { + throw new ExecutionException("Failed to update the metadata table.", e); + } + } + + private void deleteMetadataTable(String table, String readVersion) throws ExecutionException { + try { + wrapper.delete(ObjectStorageUtils.getObjectKey(metadataNamespace, table, null), readVersion); + } catch (Exception e) { + throw new ExecutionException("Failed to delete the metadata table.", e); + } + } + + private void deleteTableData(String namespace, String table) throws ExecutionException { + try { + wrapper.deleteByPrefix(ObjectStorageUtils.getObjectKey(namespace, table, null)); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to delete the table data of %s", + ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + private static String getTableMetadataKey(String namespace, String table) { + return String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), namespace, table); + } + + private static String getNamespaceNameFromTableMetadataKey(String tableMetadataKey) { + List parts = + Splitter.on(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER).splitToList(tableMetadataKey); + if (parts.size() != 2 || parts.get(0).isEmpty()) { + throw new IllegalArgumentException("Invalid table metadata key: " + tableMetadataKey); + } + return parts.get(0); + } + + private static String getTableNameFromTableMetadataKey(String tableMetadataKey) { + List parts = + Splitter.on(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER).splitToList(tableMetadataKey); + if (parts.size() != 2 || parts.get(1).isEmpty()) { + throw new IllegalArgumentException("Invalid table metadata key: " + tableMetadataKey); + } + return parts.get(1); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java new file mode 100644 index 0000000000..875cce3bd1 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +public interface ObjectStorageConfig { + /** + * Returns the storage name. + * + * @return the storage name + */ + String getStorageName(); + + /** + * Returns the endpoint for the object storage service. + * + * @return the endpoint + */ + String getEndpoint(); + + /** + * Returns the username for authentication. + * + * @return the username + */ + String getUsername(); + + /** + * Returns the password for authentication. + * + * @return the password + */ + String getPassword(); + + /** + * Returns the bucket name. + * + * @return the bucket name + */ + String getBucket(); + + /** + * Returns the metadata namespace. + * + * @return the metadata namespace + */ + String getMetadataNamespace(); +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java new file mode 100644 index 0000000000..9ca4df6b75 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java @@ -0,0 +1,61 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageMutation extends ObjectStorageOperation { + ObjectStorageMutation(Mutation mutation, TableMetadata metadata) { + super(mutation, metadata); + } + + @Nonnull + public ObjectStorageRecord makeRecord() { + Mutation mutation = (Mutation) getOperation(); + + if (mutation instanceof Delete) { + return new ObjectStorageRecord(); + } + Put put = (Put) mutation; + + return new ObjectStorageRecord( + getConcatenatedKey(), + toMap(put.getPartitionKey().getColumns()), + put.getClusteringKey().map(k -> toMap(k.getColumns())).orElse(Collections.emptyMap()), + toMapForPut(put)); + } + + @Nonnull + public ObjectStorageRecord makeRecord(ObjectStorageRecord existingRecord) { + Mutation mutation = (Mutation) getOperation(); + + if (mutation instanceof Delete) { + return new ObjectStorageRecord(); + } + Put put = (Put) mutation; + + ObjectStorageRecord newRecord = new ObjectStorageRecord(existingRecord); + toMapForPut(put).forEach((k, v) -> newRecord.getValues().put(k, v)); + return newRecord; + } + + private Map toMap(Collection> columns) { + MapVisitor visitor = new MapVisitor(); + columns.forEach(c -> c.accept(visitor)); + return visitor.get(); + } + + private Map toMapForPut(Put put) { + MapVisitor visitor = new MapVisitor(); + put.getColumns().values().forEach(c -> c.accept(visitor)); + return visitor.get(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java new file mode 100644 index 0000000000..9f85e3e65b --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java @@ -0,0 +1,41 @@ +package com.scalar.db.storage.objectstorage; + +import java.util.Objects; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageNamespaceMetadata { + private final String name; + + // The default constructor is required by Jackson to deserialize JSON object + public ObjectStorageNamespaceMetadata() { + this(null); + } + + public ObjectStorageNamespaceMetadata(@Nullable String name) { + this.name = name != null ? name : ""; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ObjectStorageNamespaceMetadata)) { + return false; + } + ObjectStorageNamespaceMetadata that = (ObjectStorageNamespaceMetadata) o; + + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java new file mode 100644 index 0000000000..e349bb51c9 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java @@ -0,0 +1,77 @@ +package com.scalar.db.storage.objectstorage; + +import com.google.common.base.Joiner; +import com.scalar.db.api.Operation; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageOperation { + private final Operation operation; + private final TableMetadata metadata; + + public ObjectStorageOperation(Operation operation, TableMetadata metadata) { + this.operation = operation; + this.metadata = metadata; + } + + @Nonnull + public Operation getOperation() { + return operation; + } + + @Nonnull + public String getConcatenatedPartitionKey() { + Map> keyMap = new HashMap<>(); + operation.getPartitionKey().getColumns().forEach(c -> keyMap.put(c.getName(), c)); + + ConcatenationVisitor visitor = new ConcatenationVisitor(); + metadata.getPartitionKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); + + return visitor.build(); + } + + @Nonnull + public String getConcatenatedClusteringKey() { + Map> keyMap = new HashMap<>(); + operation + .getClusteringKey() + .ifPresent(k -> k.getColumns().forEach(c -> keyMap.put(c.getName(), c))); + + ConcatenationVisitor visitor = new ConcatenationVisitor(); + metadata.getClusteringKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); + + return visitor.build(); + } + + @Nonnull + public String getConcatenatedKey() { + if (operation.getClusteringKey().isPresent()) { + return String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + getConcatenatedPartitionKey(), + getConcatenatedClusteringKey()); + } + return getConcatenatedPartitionKey(); + } + + @SafeVarargs + public final void checkArgument(Class... expected) { + for (Class e : expected) { + if (e.isInstance(operation)) { + return; + } + } + throw new IllegalArgumentException( + Joiner.on(" ") + .join( + new String[] { + operation.getClass().toString(), "is passed where something like", + expected[0].toString(), "is expected" + })); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java new file mode 100644 index 0000000000..732266f03b --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java @@ -0,0 +1,123 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; + +public class ObjectStorageOperationChecker extends OperationChecker { + private static final char[] ILLEGAL_CHARACTERS_IN_PRIMARY_KEY = { + ObjectStorageUtils.OBJECT_KEY_DELIMITER, ObjectStorageUtils.CONCATENATED_KEY_DELIMITER, + }; + + private static final ColumnVisitor PRIMARY_KEY_COLUMN_CHECKER = + new ColumnVisitor() { + @Override + public void visit(BooleanColumn column) {} + + @Override + public void visit(IntColumn column) {} + + @Override + public void visit(BigIntColumn column) {} + + @Override + public void visit(FloatColumn column) {} + + @Override + public void visit(DoubleColumn column) {} + + @Override + public void visit(TextColumn column) { + String value = column.getTextValue(); + assert value != null; + + for (char illegalCharacter : ILLEGAL_CHARACTERS_IN_PRIMARY_KEY) { + if (value.indexOf(illegalCharacter) != -1) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER.buildMessage( + column.getName(), value)); + } + } + } + + @Override + public void visit(BlobColumn column) {} + + @Override + public void visit(DateColumn column) {} + + @Override + public void visit(TimeColumn column) {} + + @Override + public void visit(TimestampColumn column) {} + + @Override + public void visit(TimestampTZColumn column) {} + }; + + public ObjectStorageOperationChecker( + DatabaseConfig databaseConfig, TableMetadataManager metadataManager) { + super(databaseConfig, metadataManager); + } + + @Override + public void check(Get get) throws ExecutionException { + super.check(get); + checkPrimaryKey(get); + } + + @Override + public void check(Scan scan) throws ExecutionException { + super.check(scan); + checkPrimaryKey(scan); + scan.getStartClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + scan.getEndClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + } + + @Override + public void check(Put put) throws ExecutionException { + super.check(put); + checkPrimaryKey(put); + } + + @Override + public void check(Delete delete) throws ExecutionException { + super.check(delete); + checkPrimaryKey(delete); + } + + private void checkPrimaryKey(Operation operation) { + operation + .getPartitionKey() + .getColumns() + .forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER)); + operation + .getClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java new file mode 100644 index 0000000000..98dd1d241f --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorage; +import com.scalar.db.api.DistributedStorageAdmin; +import com.scalar.db.api.DistributedStorageProvider; +import com.scalar.db.common.CheckedDistributedStorageAdmin; +import com.scalar.db.config.DatabaseConfig; + +public interface ObjectStorageProvider extends DistributedStorageProvider { + @Override + default DistributedStorage createDistributedStorage(DatabaseConfig config) { + return new ObjectStorage(config); + } + + @Override + default DistributedStorageAdmin createDistributedStorageAdmin(DatabaseConfig config) { + return new CheckedDistributedStorageAdmin(new ObjectStorageAdmin(config), config); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java new file mode 100644 index 0000000000..e3d473ff5e --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java @@ -0,0 +1,116 @@ +package com.scalar.db.storage.objectstorage; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageRecord { + private final String concatenatedKey; + private final Map partitionKey; + private final Map clusteringKey; + private final Map values; + + // The default constructor is required by Jackson to deserialize JSON object + public ObjectStorageRecord() { + this(null, null, null, null); + } + + public ObjectStorageRecord( + @Nullable String concatenatedKey, + @Nullable Map partitionKey, + @Nullable Map clusteringKey, + @Nullable Map values) { + this.concatenatedKey = concatenatedKey != null ? concatenatedKey : ""; + this.partitionKey = partitionKey != null ? partitionKey : Collections.emptyMap(); + this.clusteringKey = clusteringKey != null ? clusteringKey : Collections.emptyMap(); + this.values = values != null ? values : Collections.emptyMap(); + } + + public ObjectStorageRecord(ObjectStorageRecord record) { + this( + record.getConcatenatedKey(), + record.getPartitionKey(), + record.getClusteringKey(), + record.getValues()); + } + + public String getConcatenatedKey() { + return concatenatedKey; + } + + public Map getPartitionKey() { + return partitionKey; + } + + public Map getClusteringKey() { + return clusteringKey; + } + + public Map getValues() { + return values; + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (!(o instanceof ObjectStorageRecord)) { + return false; + } + ObjectStorageRecord other = (ObjectStorageRecord) o; + if (!other.getConcatenatedKey().equals(concatenatedKey)) { + return false; + } + if (!other.getPartitionKey().equals(partitionKey)) { + return false; + } + if (!other.getClusteringKey().equals(clusteringKey)) { + return false; + } + return other.getValues().equals(values); + } + + @Override + public int hashCode() { + return Objects.hash(concatenatedKey, partitionKey, clusteringKey, values); + } + + // Builder + + public static class Builder { + private String concatenatedPartitionKey; + private Map partitionKey; + private Map clusteringKey; + private Map values; + + public Builder() {} + + public Builder concatenatedPartitionKey(String concatenatedPartitionKey) { + this.concatenatedPartitionKey = concatenatedPartitionKey; + return this; + } + + public Builder partitionKey(Map partitionKey) { + this.partitionKey = partitionKey; + return this; + } + + public Builder clusteringKey(Map clusteringKey) { + this.clusteringKey = clusteringKey; + return this; + } + + public Builder values(Map values) { + this.values = values; + return this; + } + + public ObjectStorageRecord build() { + return new ObjectStorageRecord(concatenatedPartitionKey, partitionKey, clusteringKey, values); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java new file mode 100644 index 0000000000..23a684f665 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -0,0 +1,142 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageTableMetadata { + private final LinkedHashSet partitionKeyNames; + private final LinkedHashSet clusteringKeyNames; + private final Map clusteringOrders; + private final Set secondaryIndexNames; + private final Map columns; + + // The default constructor is required by Jackson to deserialize JSON object + public ObjectStorageTableMetadata() { + this(null, null, null, null, null); + } + + public ObjectStorageTableMetadata( + @Nullable LinkedHashSet partitionKeyNames, + @Nullable LinkedHashSet clusteringKeyNames, + @Nullable Map clusteringOrders, + @Nullable Set secondaryIndexNames, + @Nullable Map columns) { + this.partitionKeyNames = partitionKeyNames != null ? partitionKeyNames : new LinkedHashSet<>(); + this.clusteringKeyNames = + clusteringKeyNames != null ? clusteringKeyNames : new LinkedHashSet<>(); + this.clusteringOrders = clusteringOrders != null ? clusteringOrders : Collections.emptyMap(); + this.secondaryIndexNames = + secondaryIndexNames != null ? secondaryIndexNames : Collections.emptySet(); + this.columns = columns != null ? columns : Collections.emptyMap(); + } + + public ObjectStorageTableMetadata(TableMetadata tableMetadata) { + Map clusteringOrders = + tableMetadata.getClusteringKeyNames().stream() + .collect(Collectors.toMap(c -> c, c -> tableMetadata.getClusteringOrder(c).name())); + Map columnTypeByName = new HashMap<>(); + tableMetadata + .getColumnNames() + .forEach( + columnName -> + columnTypeByName.put( + columnName, tableMetadata.getColumnDataType(columnName).name().toLowerCase())); + this.partitionKeyNames = tableMetadata.getPartitionKeyNames(); + this.clusteringKeyNames = tableMetadata.getClusteringKeyNames(); + this.clusteringOrders = clusteringOrders; + this.secondaryIndexNames = tableMetadata.getSecondaryIndexNames(); + this.columns = columnTypeByName; + } + + public LinkedHashSet getPartitionKeyNames() { + return partitionKeyNames; + } + + public LinkedHashSet getClusteringKeyNames() { + return clusteringKeyNames; + } + + public Map getClusteringOrders() { + return clusteringOrders; + } + + public Set getSecondaryIndexNames() { + return secondaryIndexNames; + } + + public Map getColumns() { + return columns; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ObjectStorageTableMetadata)) { + return false; + } + ObjectStorageTableMetadata that = (ObjectStorageTableMetadata) o; + return Objects.equals(partitionKeyNames, that.partitionKeyNames) + && Objects.equals(clusteringKeyNames, that.clusteringKeyNames) + && Objects.equals(clusteringOrders, that.clusteringOrders) + && Objects.equals(secondaryIndexNames, that.secondaryIndexNames) + && Objects.equals(columns, that.columns); + } + + @Override + public int hashCode() { + return Objects.hash( + partitionKeyNames, clusteringKeyNames, clusteringOrders, secondaryIndexNames, columns); + } + + public TableMetadata toTableMetadata() { + TableMetadata.Builder builder = TableMetadata.newBuilder(); + partitionKeyNames.forEach(builder::addPartitionKey); + clusteringKeyNames.forEach( + n -> builder.addClusteringKey(n, Scan.Ordering.Order.valueOf(clusteringOrders.get(n)))); + secondaryIndexNames.forEach(builder::addSecondaryIndex); + columns.forEach((key, value) -> builder.addColumn(key, convertDataType(value))); + return builder.build(); + } + + private DataType convertDataType(String columnType) { + switch (columnType) { + case "int": + return DataType.INT; + case "bigint": + return DataType.BIGINT; + case "float": + return DataType.FLOAT; + case "double": + return DataType.DOUBLE; + case "text": + return DataType.TEXT; + case "boolean": + return DataType.BOOLEAN; + case "blob": + return DataType.BLOB; + case "date": + return DataType.DATE; + case "time": + return DataType.TIME; + case "timestamp": + return DataType.TIMESTAMP; + case "timestamptz": + return DataType.TIMESTAMPTZ; + default: + throw new AssertionError("Unknown column type: " + columnType); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java new file mode 100644 index 0000000000..1443a150a2 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java @@ -0,0 +1,53 @@ +package com.scalar.db.storage.objectstorage; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.common.StorageSharedKeyCredential; +import com.scalar.db.config.DatabaseConfig; +import java.util.Objects; +import javax.annotation.Nullable; + +public class ObjectStorageUtils { + public static final char OBJECT_KEY_DELIMITER = '/'; + public static final char CONCATENATED_KEY_DELIMITER = '*'; + + public static String getObjectKey(String namespace, String table, @Nullable String partition) { + if (partition == null) { + return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table); + } else { + return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table, partition); + } + } + + public static ObjectStorageConfig getObjectStorageConfig(DatabaseConfig databaseConfig) { + if (Objects.equals(databaseConfig.getStorage(), BlobConfig.STORAGE_NAME)) { + return new BlobConfig(databaseConfig); + } else { + throw new IllegalArgumentException( + "Unsupported object storage: " + databaseConfig.getStorage()); + } + } + + public static ObjectStorageWrapper getObjectStorageWrapper( + ObjectStorageConfig objectStorageConfig) { + if (Objects.equals(objectStorageConfig.getStorageName(), BlobConfig.STORAGE_NAME)) { + assert objectStorageConfig instanceof BlobConfig; + return new BlobWrapper( + buildBlobContainerClient(objectStorageConfig), (BlobConfig) objectStorageConfig); + } else { + throw new IllegalArgumentException( + "Unsupported object storage: " + objectStorageConfig.getStorageName()); + } + } + + private static BlobContainerClient buildBlobContainerClient( + ObjectStorageConfig objectStorageConfig) { + return new BlobServiceClientBuilder() + .endpoint(objectStorageConfig.getEndpoint()) + .credential( + new StorageSharedKeyCredential( + objectStorageConfig.getUsername(), objectStorageConfig.getPassword())) + .buildClient() + .getBlobContainerClient(objectStorageConfig.getBucket()); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java new file mode 100644 index 0000000000..70110899d3 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java @@ -0,0 +1,71 @@ +package com.scalar.db.storage.objectstorage; + +import java.util.Set; + +public interface ObjectStorageWrapper { + + /** + * Get the object from the storage. + * + * @param key the key of the object + * @throws ObjectStorageWrapperException if the object does not exist + * @return the object and its version + */ + ObjectStorageWrapperResponse get(String key) throws ObjectStorageWrapperException; + + /** + * Get object keys with the specified prefix. + * + * @param prefix the prefix of the keys + * @return the set of keys with the specified prefix + */ + Set getKeys(String prefix); + + /** + * Insert the object into the storage. + * + * @param key the key of the object + * @param object the object to insert + * @throws ObjectStorageWrapperException if the object already exists or a conflict occurs + */ + void insert(String key, String object) throws ObjectStorageWrapperException; + + /** + * Update the object in the storage if the version matches. + * + * @param key the key of the object + * @param object the updated object + * @param version the expected version of the object + * @throws ObjectStorageWrapperException if the object does not exist or the version does not + * match + */ + void update(String key, String object, String version) throws ObjectStorageWrapperException; + + /** + * Delete the object from the storage. + * + * @param key the key of the object + * @throws ObjectStorageWrapperException if the object does not exist or a conflict occurs + */ + void delete(String key) throws ObjectStorageWrapperException; + + /** + * Delete the object from the storage if the version matches. + * + * @param key the key of the object + * @param version the expected version of the object + * @throws ObjectStorageWrapperException if the object does not exist or the version does not + * match + */ + void delete(String key, String version) throws ObjectStorageWrapperException; + + /** + * Delete objects with the specified prefix from the storage. + * + * @param prefix the prefix of the objects to delete + */ + void deleteByPrefix(String prefix); + + /** Close the storage wrapper. */ + void close(); +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java new file mode 100644 index 0000000000..875590139f --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java @@ -0,0 +1,20 @@ +package com.scalar.db.storage.objectstorage; + +public class ObjectStorageWrapperException extends Exception { + private final StatusCode code; + + public ObjectStorageWrapperException(StatusCode code, Throwable cause) { + super(cause); + this.code = code; + } + + public StatusCode getStatusCode() { + return code; + } + + public enum StatusCode { + NOT_FOUND, + ALREADY_EXISTS, + VERSION_MISMATCH, + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java new file mode 100644 index 0000000000..02aa6f6454 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +public class ObjectStorageWrapperResponse { + private final String payload; + private final String version; + + public ObjectStorageWrapperResponse(String payload, String version) { + this.payload = payload; + this.version = version; + } + + public String getPayload() { + return payload; + } + + public String getVersion() { + return version; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java new file mode 100644 index 0000000000..41d65deb90 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +public class PartitionIdentifier { + private final String namespaceName; + private final String tableName; + private final String partitionName; + + public PartitionIdentifier(String namespaceName, String tableName, String partitionName) { + this.namespaceName = namespaceName; + this.tableName = tableName; + this.partitionName = partitionName; + } + + public static PartitionIdentifier of( + String namespaceName, String tableName, String partitionName) { + return new PartitionIdentifier(namespaceName, tableName, partitionName); + } + + public String getNamespaceName() { + return namespaceName; + } + + public String getTableName() { + return tableName; + } + + public String getPartitionName() { + return partitionName; + } + + @Override + public int hashCode() { + return (namespaceName + tableName + partitionName).hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof PartitionIdentifier)) return false; + PartitionIdentifier other = (PartitionIdentifier) obj; + return namespaceName.equals(other.namespaceName) + && tableName.equals(other.tableName) + && partitionName.equals(other.partitionName); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java new file mode 100644 index 0000000000..19246231c0 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java @@ -0,0 +1,53 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Result; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.ResultImpl; +import com.scalar.db.io.Column; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class ResultInterpreter { + private final List projections; + private final TableMetadata metadata; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ResultInterpreter(List projections, TableMetadata metadata) { + this.projections = Objects.requireNonNull(projections); + this.metadata = Objects.requireNonNull(metadata); + } + + public Result interpret(ObjectStorageRecord record) { + Map> ret = new HashMap<>(); + + if (projections.isEmpty()) { + metadata.getColumnNames().forEach(name -> add(ret, name, record, metadata)); + } else { + projections.forEach(name -> add(ret, name, record, metadata)); + } + + return new ResultImpl(ret, metadata); + } + + private void add( + Map> columns, + String name, + ObjectStorageRecord record, + TableMetadata metadata) { + Object value; + if (record.getPartitionKey().containsKey(name)) { + value = record.getPartitionKey().get(name); + } else if (record.getClusteringKey().containsKey(name)) { + value = record.getClusteringKey().get(name); + } else { + value = record.getValues().get(name); + } + + columns.put(name, ColumnValueMapper.convert(value, name, metadata.getColumnDataType(name))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java new file mode 100644 index 0000000000..27efcdd22f --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java @@ -0,0 +1,70 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Result; +import com.scalar.db.api.Scanner; +import com.scalar.db.common.ScannerIterator; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe +public class ScannerImpl implements Scanner { + private final Iterator recordIterator; + private final ResultInterpreter resultInterpreter; + private final int recordCountLimit; + + private ScannerIterator scannerIterator; + private int recordCount; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ScannerImpl( + Iterator recordIterator, + ResultInterpreter resultInterpreter, + int recordCountLimit) { + this.recordIterator = recordIterator; + this.resultInterpreter = resultInterpreter; + this.recordCountLimit = recordCountLimit; + this.recordCount = 0; + } + + @Override + @Nonnull + public Optional one() { + if (!recordIterator.hasNext()) { + return Optional.empty(); + } + if (recordCountLimit != 0 && recordCount >= recordCountLimit) { + return Optional.empty(); + } + recordCount++; + return Optional.of(resultInterpreter.interpret(recordIterator.next())); + } + + @Override + @Nonnull + public List all() { + List results = new ArrayList<>(); + Optional result; + while ((result = one()).isPresent()) { + results.add(result.get()); + } + return results; + } + + @Override + public void close() throws IOException {} + + @Override + @Nonnull + public Iterator iterator() { + if (scannerIterator == null) { + scannerIterator = new ScannerIterator(this); + } + return scannerIterator; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java new file mode 100644 index 0000000000..d7140aaf84 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java @@ -0,0 +1,303 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.api.Get; +import com.scalar.db.api.Scan; +import com.scalar.db.api.ScanAll; +import com.scalar.db.api.Scanner; +import com.scalar.db.api.Selection; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.EmptyScanner; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Column; +import com.scalar.db.io.Key; +import com.scalar.db.util.ScalarDbUtils; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class SelectStatementHandler extends StatementHandler { + public SelectStatementHandler( + ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + super(wrapper, metadataManager); + } + + @Nonnull + public Scanner handle(Selection selection) throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(selection); + if (selection instanceof Get) { + if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } else { + return executeGet((Get) selection, tableMetadata); + } + } else { + if (selection instanceof ScanAll) { + return executeScanAll((ScanAll) selection, tableMetadata); + } else if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } else { + return executeScan((Scan) selection, tableMetadata); + } + } + } + + private Scanner executeGet(Get get, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(get, metadata); + operation.checkArgument(Get.class); + Optional record = + getRecord( + getNamespace(get), + getTable(get), + operation.getConcatenatedPartitionKey(), + operation.getConcatenatedKey()); + if (!record.isPresent()) { + return new EmptyScanner(); + } + return new ScannerImpl( + Collections.singletonList(record.get()).iterator(), + new ResultInterpreter(get.getProjections(), metadata), + 1); + } + + private Scanner executeScan(Scan scan, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); + operation.checkArgument(Scan.class); + List records = + new ArrayList<>( + getRecordsInPartition( + getNamespace(scan), getTable(scan), operation.getConcatenatedPartitionKey())); + + records.sort( + (o1, o2) -> + new ClusteringKeyComparator(metadata) + .compare(o1.getClusteringKey(), o2.getClusteringKey())); + if (isReverseOrder(scan, metadata)) { + Collections.reverse(records); + } + + // If the scan is for DESC clustering order, use the end clustering key as a start key and the + // start clustering key as an end key + boolean scanForDescClusteringOrder = isScanForDescClusteringOrder(scan, metadata); + Optional startKey = + scanForDescClusteringOrder ? scan.getEndClusteringKey() : scan.getStartClusteringKey(); + boolean startInclusive = + scanForDescClusteringOrder ? scan.getEndInclusive() : scan.getStartInclusive(); + Optional endKey = + scanForDescClusteringOrder ? scan.getStartClusteringKey() : scan.getEndClusteringKey(); + boolean endInclusive = + scanForDescClusteringOrder ? scan.getStartInclusive() : scan.getEndInclusive(); + + if (startKey.isPresent()) { + records = + filterRecordsByClusteringKeyBoundary( + records, startKey.get(), true, startInclusive, metadata); + } + if (endKey.isPresent()) { + records = + filterRecordsByClusteringKeyBoundary( + records, endKey.get(), false, endInclusive, metadata); + } + + if (scan.getLimit() > 0) { + records = records.subList(0, Math.min(scan.getLimit(), records.size())); + } + + return new ScannerImpl( + records.iterator(), + new ResultInterpreter(scan.getProjections(), metadata), + scan.getLimit()); + } + + private Scanner executeScanAll(ScanAll scan, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); + operation.checkArgument(ScanAll.class); + Set records = getRecordsInTable(getNamespace(scan), getTable(scan)); + if (scan.getLimit() > 0) { + records = records.stream().limit(scan.getLimit()).collect(Collectors.toSet()); + } + return new ScannerImpl( + records.iterator(), + new ResultInterpreter(scan.getProjections(), metadata), + scan.getLimit()); + } + + private Optional getRecord( + String namespace, String table, String partition, String concatenatedKey) + throws ExecutionException { + try { + ObjectStorageWrapperResponse response = + wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); + Map recordsInPartition = + JsonConvertor.deserialize( + response.getPayload(), new TypeReference>() {}); + return Optional.ofNullable(recordsInPartition.get(concatenatedKey)); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + // the specified partition does not exist + return Optional.empty(); + } else { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + } + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + } + } + + private Set getRecordsInPartition( + String namespace, String table, String partition) throws ExecutionException { + try { + ObjectStorageWrapperResponse response = + wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); + Map recordsInPartition = + JsonConvertor.deserialize( + response.getPayload(), new TypeReference>() {}); + return new HashSet<>(recordsInPartition.values()); + } catch (ObjectStorageWrapperException e) { + if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + // the specified partition does not exist + return Collections.emptySet(); + } else { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + } + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + } + } + + private Set getRecordsInTable(String namespace, String table) + throws ExecutionException { + Set partitionNames = + wrapper.getKeys(ObjectStorageUtils.getObjectKey(namespace, table, "")).stream() + .map(key -> key.substring(key.lastIndexOf(ObjectStorageUtils.OBJECT_KEY_DELIMITER) + 1)) + .filter(partition -> !partition.isEmpty()) + .collect(Collectors.toSet()); + Set records = new HashSet<>(); + for (String key : partitionNames) { + try { + records.addAll(getRecordsInPartition(namespace, table, key)); + } catch (ExecutionException e) { + throw e; + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + } + } + return records; + } + + private boolean isReverseOrder(Scan scan, TableMetadata metadata) { + Boolean reverse = null; + Iterator iterator = metadata.getClusteringKeyNames().iterator(); + for (Scan.Ordering ordering : scan.getOrderings()) { + String clusteringKeyName = iterator.next(); + if (!ordering.getColumnName().equals(clusteringKeyName)) { + throw new IllegalArgumentException( + CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); + } + + boolean rightOrder = + ordering.getOrder() != metadata.getClusteringOrder(ordering.getColumnName()); + if (reverse == null) { + reverse = rightOrder; + } else { + if (reverse != rightOrder) { + throw new IllegalArgumentException( + CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); + } + } + } + return reverse != null && reverse; + } + + private boolean isScanForDescClusteringOrder(Scan scan, TableMetadata tableMetadata) { + if (scan.getStartClusteringKey().isPresent()) { + Key startClusteringKey = scan.getStartClusteringKey().get(); + String lastValueName = + startClusteringKey.getColumns().get(startClusteringKey.size() - 1).getName(); + return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; + } + if (scan.getEndClusteringKey().isPresent()) { + Key endClusteringKey = scan.getEndClusteringKey().get(); + String lastValueName = + endClusteringKey.getColumns().get(endClusteringKey.size() - 1).getName(); + return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; + } + return false; + } + + private List filterRecordsByClusteringKeyBoundary( + List records, + Key clusteringKey, + boolean isStart, + boolean isInclusive, + TableMetadata metadata) { + for (Column column : clusteringKey.getColumns()) { + Scan.Ordering.Order order = metadata.getClusteringOrder(column.getName()); + if (clusteringKey.getColumns().indexOf(column) == clusteringKey.size() - 1) { + return records.stream() + .filter( + record -> { + Column recordColumn = + ColumnValueMapper.convert( + record.getClusteringKey().get(column.getName()), + column.getName(), + column.getDataType()); + int cmp = + new ColumnComparator(column.getDataType()).compare(recordColumn, column); + cmp = order == Scan.Ordering.Order.ASC ? cmp : -cmp; + if (isStart) { + if (isInclusive) { + return cmp >= 0; + } else { + return cmp > 0; + } + } else { + if (isInclusive) { + return cmp <= 0; + } else { + return cmp < 0; + } + } + }) + .collect(Collectors.toList()); + } else { + List tmpRecords = new ArrayList<>(); + records.forEach( + record -> { + Column recordColumn = + ColumnValueMapper.convert( + record.getClusteringKey().get(column.getName()), + column.getName(), + column.getDataType()); + int cmp = new ColumnComparator(column.getDataType()).compare(recordColumn, column); + if (cmp == 0) { + tmpRecords.add(record); + } + }); + if (tmpRecords.isEmpty()) { + return Collections.emptyList(); + } + records = tmpRecords; + } + } + return records; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java new file mode 100644 index 0000000000..259f75cd83 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java @@ -0,0 +1,132 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Operation; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.List; +import javax.annotation.Nonnull; + +public class StatementHandler { + protected final ObjectStorageWrapper wrapper; + protected final TableMetadataManager metadataManager; + + public StatementHandler(ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + this.wrapper = wrapper; + this.metadataManager = metadataManager; + } + + @Nonnull + protected String getNamespace(Operation operation) { + assert operation.forNamespace().isPresent(); + return operation.forNamespace().get(); + } + + @Nonnull + protected String getTable(Operation operation) { + assert operation.forTable().isPresent(); + return operation.forTable().get(); + } + + protected void validateConditions( + ObjectStorageRecord record, List expressions, TableMetadata metadata) + throws ExecutionException { + for (ConditionalExpression expression : expressions) { + Column expectedColumn = expression.getColumn(); + Column actualColumn = + ColumnValueMapper.convert( + record.getValues().get(expectedColumn.getName()), + expectedColumn.getName(), + metadata.getColumnDataType(expectedColumn.getName())); + DataType dataType = metadata.getColumnDataType(expectedColumn.getName()); + boolean validationFailed = false; + switch (expression.getOperator()) { + case EQ: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) != 0) { + validationFailed = true; + break; + } + break; + case NE: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) == 0) { + validationFailed = true; + break; + } + break; + case GT: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) <= 0) { + validationFailed = true; + break; + } + break; + case GTE: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) < 0) { + validationFailed = true; + break; + } + break; + case LT: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) >= 0) { + validationFailed = true; + break; + } + break; + case LTE: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) > 0) { + validationFailed = true; + break; + } + break; + case IS_NULL: + if (!actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + break; + case IS_NOT_NULL: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + break; + case LIKE: + case NOT_LIKE: + default: + throw new AssertionError("Unsupported operator"); + } + if (validationFailed) { + throw new ExecutionException( + String.format( + "A condition failed. ConditionalExpression: %s, Column: %s", + expectedColumn, actualColumn)); + } + } + } +} diff --git a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider index bcee21a06b..6cd5e50817 100644 --- a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider +++ b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider @@ -2,4 +2,5 @@ com.scalar.db.storage.cassandra.CassandraProvider com.scalar.db.storage.cosmos.CosmosProvider com.scalar.db.storage.dynamo.DynamoProvider com.scalar.db.storage.jdbc.JdbcProvider +com.scalar.db.storage.objectstorage.BlobProvider com.scalar.db.storage.multistorage.MultiStorageProvider From 9299df646cb36af43294243e89549b8ea2012408 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Thu, 23 Oct 2025 08:52:48 +0900 Subject: [PATCH 02/20] Refactor --- .github/workflows/ci.yaml | 67 +- build.gradle | 2 +- core/build.gradle | 2 +- ...AdminIntegrationTestWithObjectStorage.java | 101 +- ...nScanIntegrationTestWithObjectStorage.java | 2 +- ...ommitIntegrationTestWithObjectStorage.java | 19 +- .../ConsensusCommitObjectStorageEnv.java | 10 +- ...cificIntegrationTestWithObjectStorage.java | 117 ++- ...geAdminCaseSensitivityIntegrationTest.java | 139 +++ .../ObjectStorageAdminIntegrationTest.java | 105 +- ...jectStorageAdminRepairIntegrationTest.java | 6 + .../ObjectStorageAdminTestUtils.java | 95 +- ...StorageCaseSensitivityIntegrationTest.java | 45 + ...rageCrossPartitionScanIntegrationTest.java | 2 +- .../objectstorage/ObjectStorageEnv.java | 27 +- .../ObjectStorageIntegrationTest.java | 25 +- ...eMutationAtomicityUnitIntegrationTest.java | 19 + ...ageWithReservedKeywordIntegrationTest.java | 45 + ...AdminIntegrationTestWithObjectStorage.java | 101 +- ...nScanIntegrationTestWithObjectStorage.java | 2 +- ...ommitIntegrationTestWithObjectStorage.java | 9 +- .../java/com/scalar/db/common/CoreError.java | 6 + .../ClusteringKeyComparator.java | 4 +- .../objectstorage/ColumnComparator.java | 70 -- .../objectstorage/MutateStatementHandler.java | 163 +-- .../storage/objectstorage/ObjectStorage.java | 2 +- .../objectstorage/ObjectStorageAdmin.java | 77 +- .../objectstorage/ObjectStorageMutation.java | 2 +- .../objectstorage/ObjectStorageOperation.java | 2 +- .../ObjectStorageOperationChecker.java | 28 + .../objectstorage/ObjectStorageRecord.java | 55 +- .../objectstorage/ObjectStorageUtils.java | 44 +- .../objectstorage/ObjectStorageWrapper.java | 27 +- .../ObjectStorageWrapperException.java | 16 +- .../ObjectStorageWrapperFactory.java | 32 + .../PreconditionFailedException.java | 12 + .../objectstorage/SelectStatementHandler.java | 83 +- .../{JsonConvertor.java => Serializer.java} | 2 +- .../objectstorage/StatementHandler.java | 15 +- .../objectstorage/{ => blob}/BlobConfig.java | 48 +- .../{ => blob}/BlobProvider.java | 4 +- .../objectstorage/{ => blob}/BlobWrapper.java | 112 +- ...m.scalar.db.api.DistributedStorageProvider | 2 +- .../storage/objectstorage/BlobConfigTest.java | 143 +++ .../ConcatenationVisitorTest.java | 192 ++++ .../MutateStatementHandlerTest.java | 992 ++++++++++++++++++ .../objectstorage/ObjectStorageAdminTest.java | 551 ++++++++++ .../ObjectStorageMutationTest.java | 114 ++ .../ObjectStorageOperationCheckerTest.java | 669 ++++++++++++ .../ObjectStorageOperationTest.java | 109 ++ .../objectstorage/ObjectStorageUtilsTest.java | 48 + ...nsusCommitSpecificIntegrationTestBase.java | 67 +- 52 files changed, 4021 insertions(+), 610 deletions(-) create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java rename core/src/main/java/com/scalar/db/storage/objectstorage/{JsonConvertor.java => Serializer.java} (97%) rename core/src/main/java/com/scalar/db/storage/objectstorage/{ => blob}/BlobConfig.java (67%) rename core/src/main/java/com/scalar/db/storage/objectstorage/{ => blob}/BlobProvider.java (55%) rename core/src/main/java/com/scalar/db/storage/objectstorage/{ => blob}/BlobWrapper.java (52%) create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 561f22b135..7066226909 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -257,8 +257,7 @@ jobs: integration-test-for-cosmos: name: Cosmos DB integration test (${{ matrix.mode.label }}) runs-on: windows-latest - env: - # Official Oracle JDK images that are windows compatible and publicly available through direct download do not exist for JDK 8 and 11 so we use instead cached versions hosted on the Scalar container registry. + env: # Official Oracle JDK images that are windows compatible and publicly available through direct download do not exist for JDK 8 and 11 so we use instead cached versions hosted on the Scalar container registry. # This variable evaluates to: if {!(Temurin JDK 8) && !(Oracle JDK 8 or 11)} then {true} else {false} SET_UP_INT_TEST_RUNTIME_JDK_WHEN_NOT_ORACLE_8_OR_11: "${{ (github.event_name == 'workflow_dispatch' && !(inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' && inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'temurin') && !(inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' && (inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' || inputs.INT_TEST_JAVA_RUNTIME_VERSION == '11'))) && 'true' || 'false' }}" SET_UP_INT_TEST_RUNTIME_ORACLE_JDK_8_OR_11: "${{ ((inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' || inputs.INT_TEST_JAVA_RUNTIME_VERSION == '11') && inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle') && 'true' || 'false' }}" @@ -1149,8 +1148,7 @@ jobs: integration-test-for-jdbc-sqlserver-2017: name: SQL Server 2017 integration test (${{ matrix.mode.label }}) runs-on: windows-latest - env: - # Official Oracle JDK images that are windows compatible and publicly available through direct download do not exist for JDK 8 and 11 so we use instead cached versions hosted on the Scalar container registry. + env: # Official Oracle JDK images that are windows compatible and publicly available through direct download do not exist for JDK 8 and 11 so we use instead cached versions hosted on the Scalar container registry. # This variable evaluates to: if {!(Temurin JDK 8) && !(Oracle JDK 8 or 11)} then {true} else {false} SET_UP_INT_TEST_RUNTIME_JDK_WHEN_NOT_ORACLE_8_OR_11: "${{ (github.event_name == 'workflow_dispatch' && !(inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' && inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'temurin') && !(inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' && (inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' || inputs.INT_TEST_JAVA_RUNTIME_VERSION == '11'))) && 'true' || 'false' }}" SET_UP_INT_TEST_RUNTIME_ORACLE_JDK_8_OR_11: "${{ ((inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' || inputs.INT_TEST_JAVA_RUNTIME_VERSION == '11') && inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle') && 'true' || 'false' }}" @@ -1897,6 +1895,67 @@ jobs: name: alloydb_16_integration_test_reports_${{ matrix.mode.label }} path: core/build/reports/tests/integrationTestJdbc + integration-test-for-blob: + name: Azure Blob Storage integration test (${{ matrix.mode.label }}) + runs-on: ubuntu-latest + + services: + postgres: + image: mcr.microsoft.com/azure-storage/azurite + env: + AZURITE_ACCOUNTS: "test:test" + ports: + - 10000:10000 + + steps: + - uses: actions/checkout@v5 + + - name: Set up JDK ${{ env.JAVA_VERSION }} (${{ env.JAVA_VENDOR }}) + uses: actions/setup-java@v5 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: ${{ env.JAVA_VENDOR }} + + - name: Set up JDK ${{ env.INT_TEST_JAVA_RUNTIME_VERSION }} (${{ env.INT_TEST_JAVA_RUNTIME_VENDOR }}) to run integration test + uses: actions/setup-java@v5 + if: ${{ env.SET_UP_INT_TEST_RUNTIME_NON_ORACLE_JDK == 'true'}} + with: + java-version: ${{ env.INT_TEST_JAVA_RUNTIME_VERSION }} + distribution: ${{ env.INT_TEST_JAVA_RUNTIME_VENDOR }} + + - name: Login to Oracle container registry + uses: docker/login-action@v3 + if: ${{ env.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' }} + with: + registry: container-registry.oracle.com + username: ${{ secrets.OCR_USERNAME }} + password: ${{ secrets.OCR_TOKEN }} + + - name: Set up JDK ${{ env.INT_TEST_JAVA_RUNTIME_VERSION }} (oracle) to run the integration test + if: ${{ env.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' }} + run: | + container_id=$(docker create "container-registry.oracle.com/java/jdk:${{ env.INT_TEST_JAVA_RUNTIME_VERSION }}") + docker cp -L "$container_id:/usr/java/default" /usr/lib/jvm/oracle-jdk && docker rm "$container_id" + + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v5 + + - name: Create Blob Storage container + run: | + az storage container create \ + --name test-container \ + --connection-string "DefaultEndpointsProtocol=http;AccountName=test;AccountKey=test;BlobEndpoint=http://localhost:10000/test;" + + - name: Execute Gradle 'integrationTestObjectStorage' task + run: ./gradlew integrationTestObjectStorage -Dscalardb.jdbc.url=http://localhost:5432/ -Dscalardb.jdbc.username=test -Dscalardb.jdbc.password=test ${{ matrix.mode.group_commit_enabled && env.INT_TEST_GRADLE_OPTIONS_FOR_GROUP_COMMIT || '' }} + + - name: Upload Gradle test reports + if: always() + uses: actions/upload-artifact@v4 + with: + name: alloydb_16_integration_test_reports_${{ matrix.mode.label }} + path: core/build/reports/tests/integrationTestJdbc + integration-test-for-multi-storage: name: Multi-storage integration test (${{ matrix.mode.label }}) runs-on: ubuntu-latest diff --git a/build.gradle b/build.gradle index 5eed9f0cb6..3438858b3a 100644 --- a/build.gradle +++ b/build.gradle @@ -28,7 +28,7 @@ subprojects { slf4jVersion = '1.7.36' cassandraDriverVersion = '3.11.5' azureCosmosVersion = '4.67.0' - azureBlobVersion = '12.28.1' + azureBlobVersion = '12.31.3' azureCosmosVersion = '4.74.0' jooqVersion = '3.14.16' awssdkVersion = '2.35.0' diff --git a/core/build.gradle b/core/build.gradle index 51467154f6..d5f3ca25ab 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -268,7 +268,7 @@ task integrationTestJdbc(type: Test) { } task integrationTestObjectStorage(type: Test) { - description = 'Runs the integration tests for object storages.' + description = 'Runs the integration tests for Object Storages.' group = 'verification' testClassesDirs = sourceSets.integrationTestObjectStorage.output.classesDirs classpath = sourceSets.integrationTestObjectStorage.runtimeClasspath diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index e7500ea596..6c31e7a1ff 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -4,7 +4,6 @@ import com.scalar.db.util.AdminTestUtils; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class ConsensusCommitAdminIntegrationTestWithObjectStorage extends ConsensusCommitAdminIntegrationTestBase { @@ -18,48 +17,112 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java index 815a4c88c0..54d5c7d019 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -18,6 +18,6 @@ protected Properties getProps(String testName) { @Test @Override - @Disabled("Cross partition scan with ordering is not supported in object storages") + @Disabled("Cross partition scan with ordering is not supported in Object Storages") public void scan_CrossPartitionScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java index a5419fcce9..abb84e4a5d 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java @@ -1,10 +1,8 @@ package com.scalar.db.storage.objectstorage; -import com.scalar.db.exception.transaction.TransactionException; import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestBase; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class ConsensusCommitIntegrationTestWithObjectStorage extends ConsensusCommitIntegrationTestBase { @@ -18,19 +16,16 @@ protected boolean isTimestampTypeSupported() { return false; } - @Test @Override - @Disabled("Index-related operations are not supported for object storages") - public void get_GetGivenForIndexColumn_ShouldReturnRecords() throws TransactionException {} + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") - public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() throws TransactionException {} + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") - public void scan_ScanGivenForIndexColumnWithConjunctions_ShouldReturnRecords() - throws TransactionException {} + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumnWithConjunctions_ShouldReturnRecords( + ScanType scanType) {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java index 1ee909e9df..25d5c9a174 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java @@ -1,7 +1,7 @@ package com.scalar.db.storage.objectstorage; -import com.scalar.db.common.ConsensusCommitTestUtils; -import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestUtils; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitTestUtils; +import java.util.Map; import java.util.Properties; public class ConsensusCommitObjectStorageEnv { @@ -11,8 +11,12 @@ public static Properties getProperties(String testName) { Properties properties = ObjectStorageEnv.getProperties(testName); // Add testName as a coordinator schema suffix - ConsensusCommitIntegrationTestUtils.addSuffixToCoordinatorNamespace(properties, testName); + ConsensusCommitTestUtils.addSuffixToCoordinatorNamespace(properties, testName); return ConsensusCommitTestUtils.loadConsensusCommitProperties(properties); } + + public static Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java index b7a7043f9d..20f961cd6a 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -1,9 +1,9 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.transaction.consensuscommit.ConsensusCommitSpecificIntegrationTestBase; +import com.scalar.db.transaction.consensuscommit.Isolation; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class ConsensusCommitSpecificIntegrationTestWithObjectStorage extends ConsensusCommitSpecificIntegrationTestBase { @@ -13,33 +13,118 @@ protected Properties getProperties(String testName) { return ConsensusCommitObjectStorageEnv.getProperties(testName); } - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void - scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan() {} + scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( + Isolation isolation) {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void - scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException() {} + scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void - scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException() {} + scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void - scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException() {} + scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void - scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException() {} + scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java new file mode 100644 index 0000000000..43108de72f --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -0,0 +1,139 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageAdminCaseSensitivityIntegrationTestBase; +import com.scalar.db.util.AdminTestUtils; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageAdminCaseSensitivityIntegrationTest + extends DistributedStorageAdminCaseSensitivityIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_IfExists_ForNonExistingColumn_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index d348ebe778..2223ea477c 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -4,7 +4,6 @@ import com.scalar.db.util.AdminTestUtils; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class ObjectStorageAdminIntegrationTest extends DistributedStorageAdminIntegrationTestBase { @@ -23,48 +22,116 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported in object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_IfExists_ForNonExistingColumn_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java index 20f52600e4..791cd20e53 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java @@ -2,6 +2,7 @@ import com.scalar.db.api.DistributedStorageAdminRepairIntegrationTestBase; import java.util.Properties; +import org.junit.jupiter.api.Disabled; public class ObjectStorageAdminRepairIntegrationTest extends DistributedStorageAdminRepairIntegrationTestBase { @@ -15,4 +16,9 @@ protected void initialize(String testName) throws Exception { super.initialize(testName); adminTestUtils = new ObjectStorageAdminTestUtils(getProperties(testName)); } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + repairTable_WhenTableAlreadyExistsWithoutIndexAndMetadataSpecifiesIndex_ShouldCreateIndex() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java index 4c2ceab265..e597f1ab0d 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java @@ -1,114 +1,93 @@ package com.scalar.db.storage.objectstorage; -import com.azure.core.util.BinaryData; -import com.azure.storage.blob.BlobContainerClient; -import com.azure.storage.blob.BlobServiceClientBuilder; -import com.azure.storage.blob.models.BlobItem; -import com.azure.storage.blob.models.ListBlobsOptions; -import com.azure.storage.common.StorageSharedKeyCredential; +import com.fasterxml.jackson.core.type.TypeReference; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.util.AdminTestUtils; +import java.util.Map; +import java.util.Optional; import java.util.Properties; -import java.util.stream.Collectors; public class ObjectStorageAdminTestUtils extends AdminTestUtils { - private final BlobContainerClient client; + private final ObjectStorageWrapper wrapper; private final String metadataNamespace; public ObjectStorageAdminTestUtils(Properties properties) { super(properties); - ObjectStorageConfig config = + ObjectStorageConfig objectStorageConfig = ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); - client = - new BlobServiceClientBuilder() - .endpoint(config.getEndpoint()) - .credential(new StorageSharedKeyCredential(config.getUsername(), config.getPassword())) - .buildClient() - .getBlobContainerClient(config.getBucket()); - metadataNamespace = config.getMetadataNamespace(); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + metadataNamespace = objectStorageConfig.getMetadataNamespace(); } @Override - public void dropNamespacesTable() throws Exception { - // Do nothing + public void dropNamespacesTable() { // Blob does not have a concept of table } @Override - public void dropMetadataTable() throws Exception { - // Do nothing + public void dropMetadataTable() { // Blob does not have a concept of table } @Override public void truncateNamespacesTable() throws Exception { - client - .listBlobs( - new ListBlobsOptions() - .setPrefix( - ObjectStorageUtils.getObjectKey( - metadataNamespace, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE, null)), - null) - .stream() - .map(BlobItem::getName) - .collect(Collectors.toList()) - .forEach( - key -> { - client.getBlobClient(key).delete(); - }); + wrapper.delete( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE)); } @Override public void truncateMetadataTable() throws Exception { - client - .listBlobs( - new ListBlobsOptions() - .setPrefix( - ObjectStorageUtils.getObjectKey( - metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE, null)), - null) - .stream() - .map(BlobItem::getName) - .collect(Collectors.toList()) - .forEach( - key -> { - client.getBlobClient(key).delete(); - }); + wrapper.delete( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE)); } @Override public void corruptMetadata(String namespace, String table) throws Exception { - client - .getBlobClient(ObjectStorageUtils.getObjectKey(metadataNamespace, table, null)) - .upload(BinaryData.fromString("corrupted metadata"), true); + String objectKey = + ObjectStorageUtils.getObjectKey(metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE); + Optional response = wrapper.get(objectKey); + if (!response.isPresent()) { + throw new IllegalArgumentException("The specified table metadata does not exist"); + } + Map metadataTable = + Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + + String tableMetadataKey = + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), namespace, table); + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata()); + + wrapper.update(objectKey, Serializer.serialize(metadataTable), response.get().getVersion()); } @Override - public void dropNamespace(String namespace) throws Exception { - // Do nothing + public void dropNamespace(String namespace) { // Blob does not have a concept of namespace } @Override - public boolean namespaceExists(String namespace) throws Exception { + public boolean namespaceExists(String namespace) { // Blob does not have a concept of namespace return true; } @Override - public boolean tableExists(String namespace, String table) throws Exception { + public boolean tableExists(String namespace, String table) { // Blob does not have a concept of table return true; } @Override - public void dropTable(String namespace, String table) throws Exception { - // Do nothing + public void dropTable(String namespace, String table) { // Blob does not have a concept of table } @Override - public void close() throws Exception { + public void close() { // Do nothing } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java new file mode 100644 index 0000000000..8515cbe204 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageCaseSensitivityIntegrationTestBase; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageCaseSensitivityIntegrationTest + extends DistributedStorageCaseSensitivityIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java index d611e892e9..e3761048db 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java @@ -25,6 +25,6 @@ protected boolean isParallelDdlSupported() { @Test @Override - @Disabled("Cross partition scan with ordering is not supported in object storages") + @Disabled("Cross partition scan with ordering is not supported in Object Storages") public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java index d43055defb..f4bd3f0fed 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java @@ -1,28 +1,30 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blob.BlobConfig; +import java.util.Collections; +import java.util.Map; import java.util.Properties; public class ObjectStorageEnv { private static final String PROP_OBJECT_STORAGE_ENDPOINT = "scalardb.object_storage.endpoint"; private static final String PROP_OBJECT_STORAGE_USERNAME = "scalardb.object_storage.username"; private static final String PROP_OBJECT_STORAGE_PASSWORD = "scalardb.object_storage.password"; - private static final String PROP_OBJECT_STORAGE_BUCKET = "scalardb.object_storage.storage_type"; - private static final String DEFAULT_BLOB_ENDPOINT = "http://localhost:10000/"; - private static final String DEFAULT_BLOB_USERNAME = "devstoreaccount1"; - private static final String DEFAULT_BLOB_PASSWORD = - "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; - private static final String DEFAULT_BLOB_CONTAINER = "fake-container"; + private static final String DEFAULT_OBJECT_STORAGE_ENDPOINT = + "http://localhost:10000/test/test-container"; + private static final String DEFAULT_OBJECT_STORAGE_USERNAME = "test"; + private static final String DEFAULT_OBJECT_STORAGE_PASSWORD = "test"; private ObjectStorageEnv() {} public static Properties getProperties(String testName) { - String accountName = System.getProperty(PROP_OBJECT_STORAGE_USERNAME, DEFAULT_BLOB_USERNAME); - String accountKey = System.getProperty(PROP_OBJECT_STORAGE_PASSWORD, DEFAULT_BLOB_PASSWORD); + String accountName = + System.getProperty(PROP_OBJECT_STORAGE_USERNAME, DEFAULT_OBJECT_STORAGE_USERNAME); + String accountKey = + System.getProperty(PROP_OBJECT_STORAGE_PASSWORD, DEFAULT_OBJECT_STORAGE_PASSWORD); String endpoint = - System.getProperty(PROP_OBJECT_STORAGE_ENDPOINT, DEFAULT_BLOB_ENDPOINT) + accountName; - String bucket = System.getProperty(PROP_OBJECT_STORAGE_BUCKET, DEFAULT_BLOB_CONTAINER); + System.getProperty(PROP_OBJECT_STORAGE_ENDPOINT, DEFAULT_OBJECT_STORAGE_ENDPOINT); Properties properties = new Properties(); properties.setProperty(DatabaseConfig.CONTACT_POINTS, endpoint); @@ -32,7 +34,6 @@ public static Properties getProperties(String testName) { properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN, "true"); properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_FILTERING, "true"); properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_ORDERING, "false"); - properties.setProperty(BlobConfig.BUCKET, bucket); // Add testName as a metadata namespace suffix properties.setProperty( @@ -41,4 +42,8 @@ public static Properties getProperties(String testName) { return properties; } + + public static Map getCreationOptions() { + return Collections.emptyMap(); + } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java index 4441d72f49..ced27160f3 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java @@ -1,9 +1,9 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageIntegrationTestBase; +import java.util.Map; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class ObjectStorageIntegrationTest extends DistributedStorageIntegrationTestBase { @Override @@ -11,34 +11,33 @@ protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") public void get_GetGivenForIndexedColumn_ShouldGet() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void scan_ScanGivenForIndexedColumn_ShouldScan() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java new file mode 100644 index 0000000000..98c4ea857f --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMutationAtomicityUnitIntegrationTestBase; +import java.util.Map; +import java.util.Properties; + +public class ObjectStorageMutationAtomicityUnitIntegrationTest + extends DistributedStorageMutationAtomicityUnitIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java new file mode 100644 index 0000000000..ce6a1ffc2e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageWithReservedKeywordIntegrationTestBase; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageWithReservedKeywordIntegrationTest + extends DistributedStorageWithReservedKeywordIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java index 3db4bae22b..beeae03f5b 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -3,7 +3,6 @@ import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionAdminIntegrationTestBase; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage extends SingleCrudOperationTransactionAdminIntegrationTestBase { @@ -13,48 +12,112 @@ protected Properties getProps(String testName) { return ObjectStorageEnv.getProperties(testName); } - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java index 3c666d908d..d5ebdb1a82 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -18,6 +18,6 @@ protected Properties getProps1(String testName) { @Test @Override - @Disabled("Cross partition scan with ordering is not supported in object storages") + @Disabled("Cross partition scan with ordering is not supported in Object Storages") public void scan_ScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java index 1e3a43ae68..1d278f8f25 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java @@ -3,7 +3,6 @@ import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitIntegrationTestBase; import java.util.Properties; import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; public class TwoPhaseConsensusCommitIntegrationTestWithObjectStorage extends TwoPhaseConsensusCommitIntegrationTestBase { @@ -13,13 +12,11 @@ protected Properties getProps1(String testName) { return ConsensusCommitObjectStorageEnv.getProperties(testName); } - @Test @Override - @Disabled("Index-related operations are not supported for object storages") + @Disabled("Object Storage does not support index-related operations") public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} - @Test @Override - @Disabled("Index-related operations are not supported for object storages") - public void scan_ScanGivenForIndexColumn_ShouldReturnRecords() {} + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} } diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index afb9d5e938..439e47b2d0 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -852,6 +852,12 @@ public enum CoreError implements ScalarDbError { "Object Storage does not support the altering column type feature", "", ""), + OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BOOLEAN_TYPE( + Category.USER_ERROR, + "0249", + "Object Storage supports only EQ, NE, IS_NULL, and IS_NOT_NULL operations for the BOOLEAN type in conditions. Mutation: %s", + "", + ""), // // Errors for the concurrency error category diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java index 7068e8ef43..3a6df85542 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java @@ -1,5 +1,6 @@ package com.scalar.db.storage.objectstorage; +import com.google.common.collect.Ordering; import com.scalar.db.api.Scan; import com.scalar.db.api.TableMetadata; import com.scalar.db.io.Column; @@ -25,8 +26,7 @@ public int compare(Map clusteringKey1, Map clust ColumnValueMapper.convert( clusteringKey2.get(columnName), columnName, metadata.getColumnDataType(columnName)); - int cmp = - new ColumnComparator(metadata.getColumnDataType(columnName)).compare(column1, column2); + int cmp = Ordering.natural().compare(column1, column2); if (cmp != 0) { return order == Scan.Ordering.Order.ASC ? cmp : -cmp; } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java deleted file mode 100644 index bdb6ec25f3..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnComparator.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.io.BigIntColumn; -import com.scalar.db.io.BlobColumn; -import com.scalar.db.io.BooleanColumn; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; -import com.scalar.db.io.DateColumn; -import com.scalar.db.io.DoubleColumn; -import com.scalar.db.io.FloatColumn; -import com.scalar.db.io.IntColumn; -import com.scalar.db.io.TextColumn; -import com.scalar.db.io.TimeColumn; -import com.scalar.db.io.TimestampColumn; -import com.scalar.db.io.TimestampTZColumn; -import java.util.Comparator; - -public class ColumnComparator implements Comparator> { - private final DataType dataType; - - public ColumnComparator(DataType dataType) { - this.dataType = dataType; - } - - @Override - public int compare(Column o1, Column o2) { - if (o1.getDataType() != dataType || o2.getDataType() != dataType) { - throw new IllegalArgumentException("The columns are not of the specified data type."); - } - int cmp; - switch (dataType) { - case BOOLEAN: - cmp = ((BooleanColumn) o1).compareTo((BooleanColumn) o2); - break; - case INT: - cmp = ((IntColumn) o1).compareTo((IntColumn) o2); - break; - case BIGINT: - cmp = ((BigIntColumn) o1).compareTo((BigIntColumn) o2); - break; - case FLOAT: - cmp = ((FloatColumn) o1).compareTo((FloatColumn) o2); - break; - case DOUBLE: - cmp = ((DoubleColumn) o1).compareTo((DoubleColumn) o2); - break; - case TEXT: - cmp = ((TextColumn) o1).compareTo((TextColumn) o2); - break; - case BLOB: - cmp = ((BlobColumn) o1).compareTo((BlobColumn) o2); - break; - case DATE: - cmp = ((DateColumn) o1).compareTo((DateColumn) o2); - break; - case TIME: - cmp = ((TimeColumn) o1).compareTo((TimeColumn) o2); - break; - case TIMESTAMP: - cmp = ((TimestampColumn) o1).compareTo((TimestampColumn) o2); - break; - case TIMESTAMPTZ: - cmp = ((TimestampTZColumn) o1).compareTo((TimestampTZColumn) o2); - break; - default: - throw new AssertionError(); - } - return cmp; - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java index 71f93868ce..e1497c6617 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; public class MutateStatementHandler extends StatementHandler { public MutateStatementHandler( @@ -61,7 +62,7 @@ public void handle(List mutations) throws ExecutionException } } - public void mutate( + private void mutate( String namespaceName, String tableName, String partitionKey, List mutations) throws ExecutionException { Map readVersionMap = new HashMap<>(); @@ -83,42 +84,42 @@ private void putInternal(Map partition, Put put) TableMetadata tableMetadata = metadataManager.getTableMetadata(put); ObjectStorageMutation mutation = new ObjectStorageMutation(put, tableMetadata); if (!put.getCondition().isPresent()) { - ObjectStorageRecord existingRecord = partition.get(mutation.getConcatenatedKey()); + ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); if (existingRecord == null) { - partition.put(mutation.getConcatenatedKey(), mutation.makeRecord()); + partition.put(mutation.getRecordId(), mutation.makeRecord()); } else { - partition.put(mutation.getConcatenatedKey(), mutation.makeRecord(existingRecord)); + partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); } } else if (put.getCondition().get() instanceof PutIfNotExists) { - if (partition.containsKey(mutation.getConcatenatedKey())) { + if (partition.containsKey(mutation.getRecordId())) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); } - partition.put(mutation.getConcatenatedKey(), mutation.makeRecord()); + partition.put(mutation.getRecordId(), mutation.makeRecord()); } else if (put.getCondition().get() instanceof PutIfExists) { - ObjectStorageRecord existingRecord = partition.get(mutation.getConcatenatedKey()); + ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); if (existingRecord == null) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); } - partition.put(mutation.getConcatenatedKey(), mutation.makeRecord(existingRecord)); + partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); } else { assert put.getCondition().get() instanceof PutIf; - ObjectStorageRecord existingRecord = partition.get(mutation.getConcatenatedKey()); + ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); if (existingRecord == null) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); } try { validateConditions( - partition.get(mutation.getConcatenatedKey()), + partition.get(mutation.getRecordId()), put.getCondition().get().getExpressions(), metadataManager.getTableMetadata(mutation.getOperation())); } catch (ExecutionException e) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put), e); } - partition.put(mutation.getConcatenatedKey(), mutation.makeRecord(existingRecord)); + partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); } } @@ -127,32 +128,42 @@ private void deleteInternal(Map partition, Delete d TableMetadata tableMetadata = metadataManager.getTableMetadata(delete); ObjectStorageMutation mutation = new ObjectStorageMutation(delete, tableMetadata); if (!delete.getCondition().isPresent()) { - partition.remove(mutation.getConcatenatedKey()); + partition.remove(mutation.getRecordId()); } else if (delete.getCondition().get() instanceof DeleteIfExists) { - if (!partition.containsKey(mutation.getConcatenatedKey())) { + if (!partition.containsKey(mutation.getRecordId())) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); } - partition.remove(mutation.getConcatenatedKey()); + partition.remove(mutation.getRecordId()); } else { assert delete.getCondition().get() instanceof DeleteIf; - if (!partition.containsKey(mutation.getConcatenatedKey())) { + if (!partition.containsKey(mutation.getRecordId())) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); } try { validateConditions( - partition.get(mutation.getConcatenatedKey()), + partition.get(mutation.getRecordId()), delete.getCondition().get().getExpressions(), metadataManager.getTableMetadata(mutation.getOperation())); } catch (ExecutionException e) { throw new NoMutationException( CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete), e); } - partition.remove(mutation.getConcatenatedKey()); + partition.remove(mutation.getRecordId()); } } + /** + * Applies the partition write. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param partition the partition to be written + * @param readVersionMap the map of read versions + * @throws ExecutionException if a failure occurs during the operation + */ private void applyPartitionWrite( String namespaceName, String tableName, @@ -176,6 +187,16 @@ private void applyPartitionWrite( } } + /** + * Gets a partition from the object storage. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param readVersionMap the map to store the read version + * @return the partition + * @throws ExecutionException if a failure occurs during the operation + */ private Map getPartition( String namespaceName, String tableName, @@ -184,23 +205,31 @@ private Map getPartition( throws ExecutionException { String objectKey = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey); try { - ObjectStorageWrapperResponse response = wrapper.get(objectKey); - readVersionMap.put( - PartitionIdentifier.of(namespaceName, tableName, partitionKey), response.getVersion()); - return JsonConvertor.deserialize( - response.getPayload(), new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + Optional response = wrapper.get(objectKey); + if (!response.isPresent()) { return new HashMap<>(); } + readVersionMap.put( + PartitionIdentifier.of(namespaceName, tableName, partitionKey), + response.get().getVersion()); + return Serializer.deserialize( + response.get().getPayload(), new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { throw new ExecutionException( - String.format( - "Failed to get partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey), - e); + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } } + /** + * Inserts a partition into the object storage. This method is called after confirming that the + * partition does not exist. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param partition the partition to be inserted + * @throws ExecutionException if a failure occurs during the operation + */ private void insertPartition( String namespaceName, String tableName, @@ -210,25 +239,27 @@ private void insertPartition( try { wrapper.insert( ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), - JsonConvertor.serialize(partition)); + Serializer.serialize(partition)); + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.ALREADY_EXISTS) { - throw new RetriableExecutionException( - CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage( - String.format( - "Conflict occurred while inserting partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey)), - e); - } throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage( - String.format( - "Failed to insert partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey)), - e); + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } } + /** + * Updates a partition in the object storage. This method is called after confirming that the + * partition exists. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param partition the partition to be updated + * @param readVersion the read version + * @throws ExecutionException if a failure occurs during the operation + */ private void updatePartition( String namespaceName, String tableName, @@ -239,49 +270,39 @@ private void updatePartition( try { wrapper.update( ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), - JsonConvertor.serialize(partition), + Serializer.serialize(partition), readVersion); + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND - || e.getStatusCode() == ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH) { - throw new RetriableExecutionException( - CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage( - String.format( - "Conflict occurred while updating partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey)), - e); - } throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage( - String.format( - "Failed to update partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey)), - e); + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } } + /** + * Deletes a partition from the object storage. This method is called after confirming that the + * partition exists. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param readVersion the read version + * @throws ExecutionException if a failure occurs during the operation + */ private void deletePartition( String namespaceName, String tableName, String partitionKey, String readVersion) throws ExecutionException { try { wrapper.delete( ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), readVersion); + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND - || e.getStatusCode() == ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH) { - throw new RetriableExecutionException( - CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage( - String.format( - "Conflict occurred while deleting partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey)), - e); - } throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage( - String.format( - "Failed to delete partition: namespace='%s', table='%s', partition='%s'", - namespaceName, tableName, partitionKey)), - e); + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); } } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java index 735c7070e6..4cb7e28654 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java @@ -39,7 +39,7 @@ public ObjectStorage(DatabaseConfig databaseConfig) { } ObjectStorageConfig objectStorageConfig = ObjectStorageUtils.getObjectStorageConfig(databaseConfig); - wrapper = ObjectStorageUtils.getObjectStorageWrapper(objectStorageConfig); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); ObjectStorageAdmin admin = new ObjectStorageAdmin(wrapper, objectStorageConfig); TableMetadataManager metadataManager = new TableMetadataManager(admin, databaseConfig.getMetadataCacheExpirationTimeSecs()); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java index 62fc79f13f..b7e56d460c 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -16,6 +16,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import javax.annotation.Nullable; @@ -38,7 +39,7 @@ public class ObjectStorageAdmin implements DistributedStorageAdmin { public ObjectStorageAdmin(DatabaseConfig databaseConfig) { ObjectStorageConfig objectStorageConfig = ObjectStorageUtils.getObjectStorageConfig(databaseConfig); - wrapper = ObjectStorageUtils.getObjectStorageWrapper(objectStorageConfig); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); metadataNamespace = objectStorageConfig.getMetadataNamespace(); } @@ -443,16 +444,15 @@ private void deleteTableMetadata(String namespace, String table) throws Executio private Map getNamespaceMetadataTable() throws ExecutionException { try { - ObjectStorageWrapperResponse response = - wrapper.get( - ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE, null)); - return JsonConvertor.deserialize( - response.getPayload(), - new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE)); + if (!response.isPresent()) { return Collections.emptyMap(); } + return Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { throw new ExecutionException("Failed to get the metadata table.", e); } } @@ -460,17 +460,16 @@ private Map getNamespaceMetadataTable() private Map getNamespaceMetadataTable( Map readVersionMap) throws ExecutionException { try { - ObjectStorageWrapperResponse response = - wrapper.get( - ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE, null)); - readVersionMap.put(NAMESPACE_METADATA_TABLE, response.getVersion()); - return JsonConvertor.deserialize( - response.getPayload(), - new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE)); + if (!response.isPresent()) { return Collections.emptyMap(); } + readVersionMap.put(NAMESPACE_METADATA_TABLE, response.get().getVersion()); + return Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { throw new ExecutionException("Failed to get the metadata table.", e); } } @@ -478,15 +477,15 @@ private Map getNamespaceMetadataTable( private Map getTableMetadataTable() throws ExecutionException { try { - ObjectStorageWrapperResponse response = - wrapper.get( - ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE, null)); - return JsonConvertor.deserialize( - response.getPayload(), new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE)); + if (!response.isPresent()) { return Collections.emptyMap(); } + return Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { throw new ExecutionException("Failed to get the metadata table.", e); } } @@ -494,16 +493,16 @@ private Map getTableMetadataTable() private Map getTableMetadataTable( Map readVersionMap) throws ExecutionException { try { - ObjectStorageWrapperResponse response = - wrapper.get( - ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE, null)); - readVersionMap.put(TABLE_METADATA_TABLE, response.getVersion()); - return JsonConvertor.deserialize( - response.getPayload(), new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE)); + if (!response.isPresent()) { return Collections.emptyMap(); } + readVersionMap.put(TABLE_METADATA_TABLE, response.get().getVersion()); + return Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { throw new ExecutionException("Failed to get the metadata table.", e); } } @@ -512,8 +511,8 @@ private void insertMetadataTable(String table, Map metadataTable) throws ExecutionException { try { wrapper.insert( - ObjectStorageUtils.getObjectKey(metadataNamespace, table, null), - JsonConvertor.serialize(metadataTable)); + ObjectStorageUtils.getObjectKey(metadataNamespace, table), + Serializer.serialize(metadataTable)); } catch (ObjectStorageWrapperException e) { throw new ExecutionException("Failed to insert the metadata table.", e); } @@ -523,8 +522,8 @@ private void updateMetadataTable( String table, Map metadataTable, String readVersion) throws ExecutionException { try { wrapper.update( - ObjectStorageUtils.getObjectKey(metadataNamespace, table, null), - JsonConvertor.serialize(metadataTable), + ObjectStorageUtils.getObjectKey(metadataNamespace, table), + Serializer.serialize(metadataTable), readVersion); } catch (Exception e) { throw new ExecutionException("Failed to update the metadata table.", e); @@ -533,7 +532,7 @@ private void updateMetadataTable( private void deleteMetadataTable(String table, String readVersion) throws ExecutionException { try { - wrapper.delete(ObjectStorageUtils.getObjectKey(metadataNamespace, table, null), readVersion); + wrapper.delete(ObjectStorageUtils.getObjectKey(metadataNamespace, table), readVersion); } catch (Exception e) { throw new ExecutionException("Failed to delete the metadata table.", e); } @@ -541,7 +540,7 @@ private void deleteMetadataTable(String table, String readVersion) throws Execut private void deleteTableData(String namespace, String table) throws ExecutionException { try { - wrapper.deleteByPrefix(ObjectStorageUtils.getObjectKey(namespace, table, null)); + wrapper.deleteByPrefix(ObjectStorageUtils.getObjectKey(namespace, table)); } catch (Exception e) { throw new ExecutionException( String.format( diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java index 9ca4df6b75..d64355fb80 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java @@ -27,7 +27,7 @@ public ObjectStorageRecord makeRecord() { Put put = (Put) mutation; return new ObjectStorageRecord( - getConcatenatedKey(), + getRecordId(), toMap(put.getPartitionKey().getColumns()), put.getClusteringKey().map(k -> toMap(k.getColumns())).orElse(Collections.emptyMap()), toMapForPut(put)); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java index e349bb51c9..d632009ae0 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java @@ -49,7 +49,7 @@ public String getConcatenatedClusteringKey() { } @Nonnull - public String getConcatenatedKey() { + public String getRecordId() { if (operation.getClusteringKey().isPresent()) { return String.join( String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java index 38322b88c4..c97b410f6a 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java @@ -1,10 +1,13 @@ package com.scalar.db.storage.objectstorage; +import com.scalar.db.api.ConditionalExpression; import com.scalar.db.api.Delete; import com.scalar.db.api.Get; +import com.scalar.db.api.Mutation; import com.scalar.db.api.Operation; import com.scalar.db.api.Put; import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; import com.scalar.db.common.CoreError; import com.scalar.db.common.StorageInfoProvider; import com.scalar.db.common.TableMetadataManager; @@ -15,6 +18,7 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DataType; import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; @@ -105,12 +109,18 @@ public void check(Scan scan) throws ExecutionException { public void check(Put put) throws ExecutionException { super.check(put); checkPrimaryKey(put); + + TableMetadata metadata = getTableMetadata(put); + checkCondition(put, metadata); } @Override public void check(Delete delete) throws ExecutionException { super.check(delete); checkPrimaryKey(delete); + + TableMetadata metadata = getTableMetadata(delete); + checkCondition(delete, metadata); } private void checkPrimaryKey(Operation operation) { @@ -123,4 +133,22 @@ private void checkPrimaryKey(Operation operation) { .ifPresent( c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); } + + private void checkCondition(Mutation mutation, TableMetadata metadata) { + if (!mutation.getCondition().isPresent()) { + return; + } + for (ConditionalExpression expression : mutation.getCondition().get().getExpressions()) { + if (metadata.getColumnDataType(expression.getColumn().getName()) == DataType.BOOLEAN) { + if (expression.getOperator() != ConditionalExpression.Operator.EQ + && expression.getOperator() != ConditionalExpression.Operator.NE + && expression.getOperator() != ConditionalExpression.Operator.IS_NULL + && expression.getOperator() != ConditionalExpression.Operator.IS_NOT_NULL) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BOOLEAN_TYPE + .buildMessage(mutation)); + } + } + } + } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java index e3d473ff5e..4caad8906a 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java @@ -8,7 +8,7 @@ @Immutable public class ObjectStorageRecord { - private final String concatenatedKey; + private final String id; private final Map partitionKey; private final Map clusteringKey; private final Map values; @@ -19,26 +19,22 @@ public ObjectStorageRecord() { } public ObjectStorageRecord( - @Nullable String concatenatedKey, + @Nullable String id, @Nullable Map partitionKey, @Nullable Map clusteringKey, @Nullable Map values) { - this.concatenatedKey = concatenatedKey != null ? concatenatedKey : ""; + this.id = id != null ? id : ""; this.partitionKey = partitionKey != null ? partitionKey : Collections.emptyMap(); this.clusteringKey = clusteringKey != null ? clusteringKey : Collections.emptyMap(); this.values = values != null ? values : Collections.emptyMap(); } public ObjectStorageRecord(ObjectStorageRecord record) { - this( - record.getConcatenatedKey(), - record.getPartitionKey(), - record.getClusteringKey(), - record.getValues()); + this(record.getId(), record.getPartitionKey(), record.getClusteringKey(), record.getValues()); } - public String getConcatenatedKey() { - return concatenatedKey; + public String getId() { + return id; } public Map getPartitionKey() { @@ -62,7 +58,7 @@ public boolean equals(Object o) { return false; } ObjectStorageRecord other = (ObjectStorageRecord) o; - if (!other.getConcatenatedKey().equals(concatenatedKey)) { + if (!other.getId().equals(id)) { return false; } if (!other.getPartitionKey().equals(partitionKey)) { @@ -76,41 +72,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(concatenatedKey, partitionKey, clusteringKey, values); - } - - // Builder - - public static class Builder { - private String concatenatedPartitionKey; - private Map partitionKey; - private Map clusteringKey; - private Map values; - - public Builder() {} - - public Builder concatenatedPartitionKey(String concatenatedPartitionKey) { - this.concatenatedPartitionKey = concatenatedPartitionKey; - return this; - } - - public Builder partitionKey(Map partitionKey) { - this.partitionKey = partitionKey; - return this; - } - - public Builder clusteringKey(Map clusteringKey) { - this.clusteringKey = clusteringKey; - return this; - } - - public Builder values(Map values) { - this.values = values; - return this; - } - - public ObjectStorageRecord build() { - return new ObjectStorageRecord(concatenatedPartitionKey, partitionKey, clusteringKey, values); - } + return Objects.hash(id, partitionKey, clusteringKey, values); } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java index 1443a150a2..cfb62dd444 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java @@ -1,22 +1,19 @@ package com.scalar.db.storage.objectstorage; -import com.azure.storage.blob.BlobContainerClient; -import com.azure.storage.blob.BlobServiceClientBuilder; -import com.azure.storage.common.StorageSharedKeyCredential; import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blob.BlobConfig; import java.util.Objects; -import javax.annotation.Nullable; public class ObjectStorageUtils { public static final char OBJECT_KEY_DELIMITER = '/'; - public static final char CONCATENATED_KEY_DELIMITER = '*'; + public static final char CONCATENATED_KEY_DELIMITER = '!'; - public static String getObjectKey(String namespace, String table, @Nullable String partition) { - if (partition == null) { - return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table); - } else { - return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table, partition); - } + public static String getObjectKey(String namespace, String table, String partition) { + return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table, partition); + } + + public static String getObjectKey(String namespace, String table) { + return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table); } public static ObjectStorageConfig getObjectStorageConfig(DatabaseConfig databaseConfig) { @@ -24,30 +21,7 @@ public static ObjectStorageConfig getObjectStorageConfig(DatabaseConfig database return new BlobConfig(databaseConfig); } else { throw new IllegalArgumentException( - "Unsupported object storage: " + databaseConfig.getStorage()); + "Unsupported Object Storage: " + databaseConfig.getStorage()); } } - - public static ObjectStorageWrapper getObjectStorageWrapper( - ObjectStorageConfig objectStorageConfig) { - if (Objects.equals(objectStorageConfig.getStorageName(), BlobConfig.STORAGE_NAME)) { - assert objectStorageConfig instanceof BlobConfig; - return new BlobWrapper( - buildBlobContainerClient(objectStorageConfig), (BlobConfig) objectStorageConfig); - } else { - throw new IllegalArgumentException( - "Unsupported object storage: " + objectStorageConfig.getStorageName()); - } - } - - private static BlobContainerClient buildBlobContainerClient( - ObjectStorageConfig objectStorageConfig) { - return new BlobServiceClientBuilder() - .endpoint(objectStorageConfig.getEndpoint()) - .credential( - new StorageSharedKeyCredential( - objectStorageConfig.getUsername(), objectStorageConfig.getPassword())) - .buildClient() - .getBlobContainerClient(objectStorageConfig.getBucket()); - } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java index 70110899d3..cf28f16574 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java @@ -1,5 +1,6 @@ package com.scalar.db.storage.objectstorage; +import java.util.Optional; import java.util.Set; public interface ObjectStorageWrapper { @@ -8,25 +9,27 @@ public interface ObjectStorageWrapper { * Get the object from the storage. * * @param key the key of the object - * @throws ObjectStorageWrapperException if the object does not exist - * @return the object and its version + * @throws ObjectStorageWrapperException if an error occurs + * @return the object and its version wrapped in an Optional if found, otherwise an empty Optional */ - ObjectStorageWrapperResponse get(String key) throws ObjectStorageWrapperException; + Optional get(String key) throws ObjectStorageWrapperException; /** * Get object keys with the specified prefix. * * @param prefix the prefix of the keys + * @throws ObjectStorageWrapperException if an error occurs * @return the set of keys with the specified prefix */ - Set getKeys(String prefix); + Set getKeys(String prefix) throws ObjectStorageWrapperException; /** * Insert the object into the storage. * * @param key the key of the object * @param object the object to insert - * @throws ObjectStorageWrapperException if the object already exists or a conflict occurs + * @throws PreconditionFailedException if the object already exists + * @throws ObjectStorageWrapperException if an error occurs */ void insert(String key, String object) throws ObjectStorageWrapperException; @@ -36,8 +39,8 @@ public interface ObjectStorageWrapper { * @param key the key of the object * @param object the updated object * @param version the expected version of the object - * @throws ObjectStorageWrapperException if the object does not exist or the version does not - * match + * @throws PreconditionFailedException if the version does not match or the object does not exist + * @throws ObjectStorageWrapperException if an error occurs */ void update(String key, String object, String version) throws ObjectStorageWrapperException; @@ -45,7 +48,8 @@ public interface ObjectStorageWrapper { * Delete the object from the storage. * * @param key the key of the object - * @throws ObjectStorageWrapperException if the object does not exist or a conflict occurs + * @throws PreconditionFailedException if the object does not exist + * @throws ObjectStorageWrapperException if an error occurs */ void delete(String key) throws ObjectStorageWrapperException; @@ -54,8 +58,8 @@ public interface ObjectStorageWrapper { * * @param key the key of the object * @param version the expected version of the object - * @throws ObjectStorageWrapperException if the object does not exist or the version does not - * match + * @throws PreconditionFailedException if the version does not match or the object does not exist + * @throws ObjectStorageWrapperException if an error occurs */ void delete(String key, String version) throws ObjectStorageWrapperException; @@ -63,8 +67,9 @@ public interface ObjectStorageWrapper { * Delete objects with the specified prefix from the storage. * * @param prefix the prefix of the objects to delete + * @throws ObjectStorageWrapperException if an error occurs */ - void deleteByPrefix(String prefix); + void deleteByPrefix(String prefix) throws ObjectStorageWrapperException; /** Close the storage wrapper. */ void close(); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java index 875590139f..2dc9546264 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java @@ -1,20 +1,12 @@ package com.scalar.db.storage.objectstorage; public class ObjectStorageWrapperException extends Exception { - private final StatusCode code; - public ObjectStorageWrapperException(StatusCode code, Throwable cause) { - super(cause); - this.code = code; + public ObjectStorageWrapperException(String message) { + super(message); } - public StatusCode getStatusCode() { - return code; - } - - public enum StatusCode { - NOT_FOUND, - ALREADY_EXISTS, - VERSION_MISMATCH, + public ObjectStorageWrapperException(String message, Throwable cause) { + super(message, cause); } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java new file mode 100644 index 0000000000..0160f27cf8 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java @@ -0,0 +1,32 @@ +package com.scalar.db.storage.objectstorage; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.common.StorageSharedKeyCredential; +import com.scalar.db.storage.objectstorage.blob.BlobConfig; +import com.scalar.db.storage.objectstorage.blob.BlobWrapper; +import java.util.Objects; + +public class ObjectStorageWrapperFactory { + public static ObjectStorageWrapper create(ObjectStorageConfig objectStorageConfig) { + if (Objects.equals(objectStorageConfig.getStorageName(), BlobConfig.STORAGE_NAME)) { + assert objectStorageConfig instanceof BlobConfig; + return new BlobWrapper( + buildBlobContainerClient(objectStorageConfig), (BlobConfig) objectStorageConfig); + } else { + throw new IllegalArgumentException( + "Unsupported Object Storage: " + objectStorageConfig.getStorageName()); + } + } + + private static BlobContainerClient buildBlobContainerClient( + ObjectStorageConfig objectStorageConfig) { + return new BlobServiceClientBuilder() + .endpoint(objectStorageConfig.getEndpoint()) + .credential( + new StorageSharedKeyCredential( + objectStorageConfig.getUsername(), objectStorageConfig.getPassword())) + .buildClient() + .getBlobContainerClient(objectStorageConfig.getBucket()); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java b/core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java new file mode 100644 index 0000000000..1b13c5d722 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +public class PreconditionFailedException extends ObjectStorageWrapperException { + + public PreconditionFailedException(String message) { + super(message); + } + + public PreconditionFailedException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java index acab6739da..8c14a278e4 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java @@ -1,6 +1,7 @@ package com.scalar.db.storage.objectstorage; import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.Ordering; import com.scalar.db.api.Get; import com.scalar.db.api.Scan; import com.scalar.db.api.ScanAll; @@ -63,7 +64,7 @@ private Scanner executeGet(Get get, TableMetadata metadata) throws ExecutionExce getNamespace(get), getTable(get), operation.getConcatenatedPartitionKey(), - operation.getConcatenatedKey()); + operation.getRecordId()); if (!record.isPresent()) { return new EmptyScanner(); } @@ -135,72 +136,64 @@ private Scanner executeScanAll(ScanAll scan, TableMetadata metadata) throws Exec scan.getLimit()); } + private Map getPartition( + String namespace, String table, String partition) throws ObjectStorageWrapperException { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); + if (!response.isPresent()) { + return Collections.emptyMap(); + } + return Serializer.deserialize( + response.get().getPayload(), new TypeReference>() {}); + } + private Optional getRecord( - String namespace, String table, String partition, String concatenatedKey) - throws ExecutionException { + String namespace, String table, String partition, String recordId) throws ExecutionException { try { - ObjectStorageWrapperResponse response = - wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); Map recordsInPartition = - JsonConvertor.deserialize( - response.getPayload(), new TypeReference>() {}); - return Optional.ofNullable(recordsInPartition.get(concatenatedKey)); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { - // the specified partition does not exist - return Optional.empty(); + getPartition(namespace, table, partition); + if (recordsInPartition.containsKey(recordId)) { + return Optional.of(recordsInPartition.get(recordId)); } else { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + return Optional.empty(); } } catch (Exception e) { throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); } } private Set getRecordsInPartition( String namespace, String table, String partition) throws ExecutionException { try { - ObjectStorageWrapperResponse response = - wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); Map recordsInPartition = - JsonConvertor.deserialize( - response.getPayload(), new TypeReference>() {}); + getPartition(namespace, table, partition); return new HashSet<>(recordsInPartition.values()); - } catch (ObjectStorageWrapperException e) { - if (e.getStatusCode() == ObjectStorageWrapperException.StatusCode.NOT_FOUND) { - // the specified partition does not exist - return Collections.emptySet(); - } else { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); - } } catch (Exception e) { throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); } } private Set getRecordsInTable(String namespace, String table) throws ExecutionException { - Set partitionNames = - wrapper.getKeys(ObjectStorageUtils.getObjectKey(namespace, table, "")).stream() - .map(key -> key.substring(key.lastIndexOf(ObjectStorageUtils.OBJECT_KEY_DELIMITER) + 1)) - .filter(partition -> !partition.isEmpty()) - .collect(Collectors.toSet()); - Set records = new HashSet<>(); - for (String key : partitionNames) { - try { + try { + Set partitionNames = + wrapper.getKeys(ObjectStorageUtils.getObjectKey(namespace, table, "")).stream() + .map( + key -> + key.substring(key.lastIndexOf(ObjectStorageUtils.OBJECT_KEY_DELIMITER) + 1)) + .filter(partition -> !partition.isEmpty()) + .collect(Collectors.toSet()); + Set records = new HashSet<>(); + for (String key : partitionNames) { records.addAll(getRecordsInPartition(namespace, table, key)); - } catch (ExecutionException e) { - throw e; - } catch (Exception e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(), e); } + return records; + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); } - return records; } private boolean isReverseOrder(Scan scan, TableMetadata metadata) { @@ -212,7 +205,6 @@ private boolean isReverseOrder(Scan scan, TableMetadata metadata) { throw new IllegalArgumentException( CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); } - boolean rightOrder = ordering.getOrder() != metadata.getClusteringOrder(ordering.getColumnName()); if (reverse == null) { @@ -260,8 +252,7 @@ record -> { record.getClusteringKey().get(column.getName()), column.getName(), column.getDataType()); - int cmp = - new ColumnComparator(column.getDataType()).compare(recordColumn, column); + int cmp = Ordering.natural().compare(recordColumn, column); cmp = order == Scan.Ordering.Order.ASC ? cmp : -cmp; if (isStart) { if (isInclusive) { @@ -287,7 +278,7 @@ record -> { record.getClusteringKey().get(column.getName()), column.getName(), column.getDataType()); - int cmp = new ColumnComparator(column.getDataType()).compare(recordColumn, column); + int cmp = Ordering.natural().compare(recordColumn, column); if (cmp == 0) { tmpRecords.add(record); } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/Serializer.java similarity index 97% rename from core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java rename to core/src/main/java/com/scalar/db/storage/objectstorage/Serializer.java index 2530bb8bd8..93c41822d5 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/JsonConvertor.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/Serializer.java @@ -6,7 +6,7 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -public class JsonConvertor { +public class Serializer { private static final ObjectMapper mapper = new ObjectMapper(); static { diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java index 259f75cd83..02739d4ec1 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java @@ -1,12 +1,12 @@ package com.scalar.db.storage.objectstorage; +import com.google.common.collect.Ordering; import com.scalar.db.api.ConditionalExpression; import com.scalar.db.api.Operation; import com.scalar.db.api.TableMetadata; import com.scalar.db.common.TableMetadataManager; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; import java.util.List; import javax.annotation.Nonnull; @@ -41,7 +41,6 @@ protected void validateConditions( record.getValues().get(expectedColumn.getName()), expectedColumn.getName(), metadata.getColumnDataType(expectedColumn.getName())); - DataType dataType = metadata.getColumnDataType(expectedColumn.getName()); boolean validationFailed = false; switch (expression.getOperator()) { case EQ: @@ -49,7 +48,7 @@ protected void validateConditions( validationFailed = true; break; } - if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) != 0) { + if (Ordering.natural().compare(actualColumn, expectedColumn) != 0) { validationFailed = true; break; } @@ -59,7 +58,7 @@ protected void validateConditions( validationFailed = true; break; } - if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) == 0) { + if (Ordering.natural().compare(actualColumn, expectedColumn) == 0) { validationFailed = true; break; } @@ -69,7 +68,7 @@ protected void validateConditions( validationFailed = true; break; } - if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) <= 0) { + if (Ordering.natural().compare(actualColumn, expectedColumn) <= 0) { validationFailed = true; break; } @@ -79,7 +78,7 @@ protected void validateConditions( validationFailed = true; break; } - if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) < 0) { + if (Ordering.natural().compare(actualColumn, expectedColumn) < 0) { validationFailed = true; break; } @@ -89,7 +88,7 @@ protected void validateConditions( validationFailed = true; break; } - if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) >= 0) { + if (Ordering.natural().compare(actualColumn, expectedColumn) >= 0) { validationFailed = true; break; } @@ -99,7 +98,7 @@ protected void validateConditions( validationFailed = true; break; } - if (new ColumnComparator(dataType).compare(actualColumn, expectedColumn) > 0) { + if (Ordering.natural().compare(actualColumn, expectedColumn) > 0) { validationFailed = true; break; } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobConfig.java similarity index 67% rename from core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java rename to core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobConfig.java index 858df378c5..6ba78e24ca 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobConfig.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobConfig.java @@ -1,4 +1,4 @@ -package com.scalar.db.storage.objectstorage; +package com.scalar.db.storage.objectstorage.blob; import static com.scalar.db.config.ConfigUtils.getInt; import static com.scalar.db.config.ConfigUtils.getLong; @@ -6,13 +6,13 @@ import com.scalar.db.common.CoreError; import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.ObjectStorageConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class BlobConfig implements ObjectStorageConfig { public static final String STORAGE_NAME = "blob"; public static final String PREFIX = DatabaseConfig.PREFIX + STORAGE_NAME + "."; - public static final String BUCKET = PREFIX + "bucket"; public static final String PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = PREFIX + "parallel_upload_block_size_in_bytes"; @@ -26,6 +26,11 @@ public class BlobConfig implements ObjectStorageConfig { @Deprecated public static final String TABLE_METADATA_NAMESPACE = PREFIX + "table_metadata.namespace"; + public static final long DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = 4 * 1024 * 1024; // 4MB + public static final int DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM = 4; + public static final long DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = 4 * 1024 * 1024; // 4MB + public static final int DEFAULT_REQUEST_TIMEOUT_IN_SECONDS = 15; + private static final Logger logger = LoggerFactory.getLogger(BlobConfig.class); private final String endpoint; private final String username; @@ -47,13 +52,17 @@ public BlobConfig(DatabaseConfig databaseConfig) { if (databaseConfig.getContactPoints().isEmpty()) { throw new IllegalArgumentException(CoreError.INVALID_CONTACT_POINTS.buildMessage()); } - endpoint = databaseConfig.getContactPoints().get(0); + String fullEndpoint = databaseConfig.getContactPoints().get(0); + int lastSlashIndex = fullEndpoint.lastIndexOf('/'); + if (lastSlashIndex != -1 && lastSlashIndex < fullEndpoint.length() - 1) { + endpoint = fullEndpoint.substring(0, lastSlashIndex); + bucket = fullEndpoint.substring(lastSlashIndex + 1); + } else { + throw new IllegalArgumentException( + "Invalid contact points format. Expected: BLOB_URI/BUCKET_NAME"); + } username = databaseConfig.getUsername().orElse(null); password = databaseConfig.getPassword().orElse(null); - if (!databaseConfig.getProperties().containsKey(BUCKET)) { - throw new IllegalArgumentException("Bucket name is not specified."); - } - bucket = databaseConfig.getProperties().getProperty(BUCKET); if (databaseConfig.getProperties().containsKey(TABLE_METADATA_NAMESPACE)) { logger.warn( @@ -69,16 +78,33 @@ public BlobConfig(DatabaseConfig databaseConfig) { metadataNamespace = databaseConfig.getSystemNamespaceName(); } + if (databaseConfig.getScanFetchSize() != DatabaseConfig.DEFAULT_SCAN_FETCH_SIZE) { + logger.warn( + "The configuration property \"" + + DatabaseConfig.SCAN_FETCH_SIZE + + "\" is not applicable to Blob storage and will be ignored."); + } + parallelUploadBlockSizeInBytes = getLong( - databaseConfig.getProperties(), PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, 50 * 1024 * 1024); + databaseConfig.getProperties(), + PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, + DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); parallelUploadMaxParallelism = - getInt(databaseConfig.getProperties(), PARALLEL_UPLOAD_MAX_PARALLELISM, 4); + getInt( + databaseConfig.getProperties(), + PARALLEL_UPLOAD_MAX_PARALLELISM, + DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); parallelUploadThresholdInBytes = getLong( - databaseConfig.getProperties(), PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, 100 * 1024 * 1024); + databaseConfig.getProperties(), + PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, + DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); requestTimeoutInSeconds = - getInt(databaseConfig.getProperties(), REQUEST_TIMEOUT_IN_SECONDS, 15); + getInt( + databaseConfig.getProperties(), + REQUEST_TIMEOUT_IN_SECONDS, + DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java similarity index 55% rename from core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java rename to core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java index 9080036964..ab1ffc4f35 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobProvider.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java @@ -1,4 +1,6 @@ -package com.scalar.db.storage.objectstorage; +package com.scalar.db.storage.objectstorage.blob; + +import com.scalar.db.storage.objectstorage.ObjectStorageProvider; public class BlobProvider implements ObjectStorageProvider { @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java similarity index 52% rename from core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java rename to core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java index 33f37baeac..0dabf3afc9 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/BlobWrapper.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java @@ -1,4 +1,4 @@ -package com.scalar.db.storage.objectstorage; +package com.scalar.db.storage.objectstorage.blob; import com.azure.core.http.HttpHeaderName; import com.azure.core.util.BinaryData; @@ -12,7 +12,12 @@ import com.azure.storage.blob.models.ListBlobsOptions; import com.azure.storage.blob.models.ParallelTransferOptions; import com.azure.storage.blob.options.BlobParallelUploadOptions; +import com.scalar.db.storage.objectstorage.ObjectStorageWrapper; +import com.scalar.db.storage.objectstorage.ObjectStorageWrapperException; +import com.scalar.db.storage.objectstorage.ObjectStorageWrapperResponse; +import com.scalar.db.storage.objectstorage.PreconditionFailedException; import java.time.Duration; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -34,29 +39,38 @@ public BlobWrapper(BlobContainerClient client, BlobConfig config) { } @Override - public ObjectStorageWrapperResponse get(String key) throws ObjectStorageWrapperException { + public Optional get(String key) + throws ObjectStorageWrapperException { try { BlobClient blobClient = client.getBlobClient(key); BlobDownloadContentResponse response = blobClient.downloadContentWithResponse(null, null, requestTimeoutInSeconds, null); String data = response.getValue().toString(); String eTag = response.getHeaders().getValue(HttpHeaderName.ETAG); - return new ObjectStorageWrapperResponse(data, eTag); + return Optional.of(new ObjectStorageWrapperResponse(data, eTag)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + return Optional.empty(); } - throw e; + throw new ObjectStorageWrapperException( + String.format("Failed to get the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to get the object with key '%s'", key), e); } } @Override - public Set getKeys(String prefix) { - return client.listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) - .stream() - .map(BlobItem::getName) - .collect(Collectors.toSet()); + public Set getKeys(String prefix) throws ObjectStorageWrapperException { + try { + return client.listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) + .stream() + .map(BlobItem::getName) + .collect(Collectors.toSet()); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to get the object keys with prefix '%s'", prefix), e); + } } @Override @@ -70,10 +84,16 @@ public void insert(String key, String object) throws ObjectStorageWrapperExcepti blobClient.uploadWithResponse(options, requestTimeoutInSeconds, null); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.ALREADY_EXISTS, e); + throw new PreconditionFailedException( + String.format( + "Failed to insert the object with key '%s' due to precondition failure", key), + e); } - throw e; + throw new ObjectStorageWrapperException( + String.format("Failed to insert the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to insert the object with key '%s'", key), e); } } @@ -88,15 +108,19 @@ public void update(String key, String object, String version) .setParallelTransferOptions(parallelTransferOptions); blobClient.uploadWithResponse(options, requestTimeoutInSeconds, null); } catch (BlobStorageException e) { - if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH, e); - } - if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET) + || e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new PreconditionFailedException( + String.format( + String.format( + "Failed to update the object with key '%s' due to precondition failure", key)), + e); } - throw e; + throw new ObjectStorageWrapperException( + String.format("Failed to update the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to update the object with key '%s'", key), e); } } @@ -107,10 +131,16 @@ public void delete(String key) throws ObjectStorageWrapperException { blobClient.delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); + throw new PreconditionFailedException( + String.format( + "Failed to delete the object with key '%s' due to precondition failure", key), + e); } - throw e; + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); } } @@ -121,23 +151,31 @@ public void delete(String key, String version) throws ObjectStorageWrapperExcept blobClient.deleteWithResponse( null, new BlobRequestConditions().setIfMatch(version), requestTimeoutInSeconds, null); } catch (BlobStorageException e) { - if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.VERSION_MISMATCH, e); + if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET) + || e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new PreconditionFailedException( + String.format( + "Failed to delete the object with key '%s' due to precondition failure", key), + e); } - if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { - throw new ObjectStorageWrapperException( - ObjectStorageWrapperException.StatusCode.NOT_FOUND, e); - } - throw e; + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); } } @Override - public void deleteByPrefix(String prefix) { - client - .listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) - .forEach(blobItem -> client.getBlobClient(blobItem.getName()).delete()); + public void deleteByPrefix(String prefix) throws ObjectStorageWrapperException { + try { + client + .listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) + .forEach(blobItem -> client.getBlobClient(blobItem.getName()).delete()); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to delete the objects with prefix '%s'", prefix), e); + } } @Override diff --git a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider index 6cd5e50817..1995926861 100644 --- a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider +++ b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider @@ -2,5 +2,5 @@ com.scalar.db.storage.cassandra.CassandraProvider com.scalar.db.storage.cosmos.CosmosProvider com.scalar.db.storage.dynamo.DynamoProvider com.scalar.db.storage.jdbc.JdbcProvider -com.scalar.db.storage.objectstorage.BlobProvider +com.scalar.db.storage.objectstorage.blob.BlobProvider com.scalar.db.storage.multistorage.MultiStorageProvider diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java new file mode 100644 index 0000000000..c9c7dcbb59 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java @@ -0,0 +1,143 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blob.BlobConfig; +import java.util.Properties; +import org.junit.jupiter.api.Test; + +public class BlobConfigTest { + private static final String ANY_USERNAME = "any_user"; + private static final String ANY_PASSWORD = "any_password"; + private static final String ANY_BUCKET = "bucket"; + private static final String ANY_ENDPOINT = "http://localhost:10000/" + ANY_USERNAME; + private static final String ANY_CONTACT_POINT = ANY_ENDPOINT + "/" + ANY_BUCKET; + private static final String BLOB_STORAGE = "blob"; + private static final String ANY_TABLE_METADATA_NAMESPACE = "any_namespace"; + private static final String ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = "5242880"; // 5MB + private static final String ANY_PARALLEL_UPLOAD_MAX_PARALLELISM = "4"; + private static final String ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = "10485760"; // 10MB + private static final String ANY_REQUEST_TIMEOUT_IN_SECONDS = "30"; + + @Test + public void constructor_AllPropertiesGiven_ShouldLoadProperly() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + props.setProperty(DatabaseConfig.SYSTEM_NAMESPACE_NAME, ANY_TABLE_METADATA_NAMESPACE); + props.setProperty( + BlobConfig.PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + props.setProperty( + BlobConfig.PARALLEL_UPLOAD_MAX_PARALLELISM, ANY_PARALLEL_UPLOAD_MAX_PARALLELISM); + props.setProperty( + BlobConfig.PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + props.setProperty(BlobConfig.REQUEST_TIMEOUT_IN_SECONDS, ANY_REQUEST_TIMEOUT_IN_SECONDS); + + // Act + BlobConfig config = new BlobConfig(new DatabaseConfig(props)); + + // Assert + assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); + assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); + assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); + assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); + assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE); + assertThat(config.getParallelUploadBlockSizeInBytes()) + .isEqualTo(Long.parseLong(ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES)); + assertThat(config.getParallelUploadMaxParallelism()) + .isEqualTo(Integer.parseInt(ANY_PARALLEL_UPLOAD_MAX_PARALLELISM)); + assertThat(config.getParallelUploadThresholdInBytes()) + .isEqualTo(Long.parseLong(ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES)); + assertThat(config.getRequestTimeoutInSeconds()) + .isEqualTo(Integer.parseInt(ANY_REQUEST_TIMEOUT_IN_SECONDS)); + } + + @Test + public void constructor_PropertiesWithoutOptimizationOptionsGiven_ShouldLoadProperly() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + props.setProperty(DatabaseConfig.SYSTEM_NAMESPACE_NAME, ANY_TABLE_METADATA_NAMESPACE); + + // Act + BlobConfig config = new BlobConfig(new DatabaseConfig(props)); + + // Assert + assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); + assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); + assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); + assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); + assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE); + assertThat(config.getParallelUploadBlockSizeInBytes()) + .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + assertThat(config.getParallelUploadMaxParallelism()) + .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); + assertThat(config.getParallelUploadThresholdInBytes()) + .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + assertThat(config.getRequestTimeoutInSeconds()) + .isEqualTo(BlobConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); + } + + @Test + public void constructor_WithoutStorage_ShouldThrowIllegalArgumentException() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + + // Act Assert + assertThatThrownBy(() -> new BlobConfig(new DatabaseConfig(props))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void constructor_WithoutSystemNamespaceName_ShouldLoadProperly() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + + // Act + BlobConfig config = new BlobConfig(new DatabaseConfig(props)); + + // Assert + assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); + assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); + assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); + assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); + assertThat(config.getMetadataNamespace()) + .isEqualTo(DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME); + assertThat(config.getParallelUploadBlockSizeInBytes()) + .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + assertThat(config.getParallelUploadMaxParallelism()) + .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); + assertThat(config.getParallelUploadThresholdInBytes()) + .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + assertThat(config.getRequestTimeoutInSeconds()) + .isEqualTo(BlobConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); + } + + @Test + public void + constructor_PropertiesWithEmptyContactPointsGiven_ShouldThrowIllegalArgumentException() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + + // Act + assertThatThrownBy(() -> new BlobConfig(new DatabaseConfig(props))) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java new file mode 100644 index 0000000000..d8416a84ef --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java @@ -0,0 +1,192 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class ConcatenationVisitorTest { + private static final boolean ANY_BOOLEAN = false; + private static final BooleanColumn ANY_BOOLEAN_COLUMN = + BooleanColumn.of("any_boolean", ANY_BOOLEAN); + private static final int ANY_INT = Integer.MIN_VALUE; + private static final IntColumn ANY_INT_COLUMN = IntColumn.of("any_int", ANY_INT); + private static final long ANY_BIGINT = BigIntColumn.MAX_VALUE; + private static final BigIntColumn ANY_BIGINT_COLUMN = BigIntColumn.of("any_bigint", ANY_BIGINT); + private static final float ANY_FLOAT = Float.MIN_NORMAL; + private static final FloatColumn ANY_FLOAT_COLUMN = FloatColumn.of("any_float", ANY_FLOAT); + private static final double ANY_DOUBLE = Double.MIN_NORMAL; + private static final DoubleColumn ANY_DOUBLE_COLUMN = DoubleColumn.of("any_double", ANY_DOUBLE); + private static final String ANY_TEXT = "test"; + private static final TextColumn ANY_TEXT_COLUMN = TextColumn.of("any_text", ANY_TEXT); + private static final byte[] ANY_BLOB = "scalar".getBytes(StandardCharsets.UTF_8); + private static final BlobColumn ANY_BLOB_COLUMN = BlobColumn.of("any_blob", ANY_BLOB); + private static final DateColumn ANY_DATE_COLUMN = DateColumn.of("any_date", DateColumn.MAX_VALUE); + private static final TimeColumn ANY_TIME_COLUMN = TimeColumn.of("any_time", TimeColumn.MAX_VALUE); + private static final TimestampColumn ANY_TIMESTAMP_COLUMN = + TimestampColumn.of("any_timestamp", TimestampColumn.MAX_VALUE); + private static final TimestampTZColumn ANY_TIMESTAMPTZ_COLUMN = + TimestampTZColumn.of("any_timestamp_tz", TimestampTZColumn.MAX_VALUE); + private ConcatenationVisitor visitor; + + @BeforeEach + public void setUp() { + visitor = new ConcatenationVisitor(); + } + + @Test + public void build_AllTypesGiven_ShouldBuildString() { + // Act + visitor.visit(ANY_BOOLEAN_COLUMN); + visitor.visit(ANY_INT_COLUMN); + visitor.visit(ANY_BIGINT_COLUMN); + visitor.visit(ANY_FLOAT_COLUMN); + visitor.visit(ANY_DOUBLE_COLUMN); + visitor.visit(ANY_TEXT_COLUMN); + visitor.visit(ANY_BLOB_COLUMN); + visitor.visit(ANY_DATE_COLUMN); + visitor.visit(ANY_TIME_COLUMN); + visitor.visit(ANY_TIMESTAMP_COLUMN); + visitor.visit(ANY_TIMESTAMPTZ_COLUMN); + String actual = visitor.build(); + + // Assert + String[] values = + actual.split(String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), -1); + assertThat(values.length).isEqualTo(11); + assertThat(values[0]).isEqualTo(String.valueOf(ANY_BOOLEAN)); + assertThat(values[1]).isEqualTo(String.valueOf(ANY_INT)); + assertThat(values[2]).isEqualTo(String.valueOf(ANY_BIGINT)); + assertThat(values[3]).isEqualTo(String.valueOf(ANY_FLOAT)); + assertThat(values[4]).isEqualTo(String.valueOf(ANY_DOUBLE)); + assertThat(values[5]).isEqualTo(ANY_TEXT); + assertThat(values[6]) + .isEqualTo(Base64.getUrlEncoder().withoutPadding().encodeToString(ANY_BLOB)); + assertThat(values[7]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN))); + assertThat(values[8]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN))); + assertThat(values[9]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN))); + assertThat(values[10]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN))); + } + + @Test + public void visit_BooleanColumnAcceptCalled_ShouldBuildBooleanAsString() { + // Act + ANY_BOOLEAN_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_BOOLEAN)); + } + + @Test + public void visit_IntColumnAcceptCalled_ShouldBuildIntAsString() { + // Act + ANY_INT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_INT)); + } + + @Test + public void visit_BigIntColumnAcceptCalled_ShouldBuildBigIntAsString() { + // Act + ANY_BIGINT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_BIGINT)); + } + + @Test + public void visit_FloatColumnAcceptCalled_ShouldBuildFloatAsString() { + // Act + ANY_FLOAT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_FLOAT)); + } + + @Test + public void visit_DoubleColumnAcceptCalled_ShouldBuildDoubleAsString() { + // Act + ANY_DOUBLE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_DOUBLE)); + } + + @Test + public void visit_TextColumnAcceptCalled_ShouldBuildText() { + // Act + ANY_TEXT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(ANY_TEXT); + } + + @Test + public void visit_BlobColumnAcceptCalled_ShouldBuildBlobAsString() { + // Act + ANY_BLOB_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(Base64.getUrlEncoder().withoutPadding().encodeToString(ANY_BLOB)); + } + + @Test + public void visit_DateColumnAcceptCalled_ShouldBuildDateAsString() { + // Act + ANY_DATE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN))); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldBuildTimeAsString() { + // Act + ANY_TIME_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN))); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldBuildTimestampAsString() { + // Act + ANY_TIMESTAMP_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN))); + } + + @Test + public void visit_TimestampTZColumnAcceptCalled_ShouldBuildTimestampTZAsString() { + // Act + ANY_TIMESTAMPTZ_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN))); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java new file mode 100644 index 0000000000..c4f3ed455e --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java @@ -0,0 +1,992 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class MutateStatementHandlerTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + private static final int ANY_INT_2 = 2; + private static final String VERSION = "version1"; + + private MutateStatementHandler handler; + @Mock private ObjectStorageWrapper wrapper; + @Mock private TableMetadataManager metadataManager; + @Mock private TableMetadata metadata; + + @Captor private ArgumentCaptor objectKeyCaptor; + @Captor private ArgumentCaptor payloadCaptor; + @Captor private ArgumentCaptor versionCaptor; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + handler = new MutateStatementHandler(wrapper, metadataManager); + + when(metadataManager.getTableMetadata(any(Operation.class))).thenReturn(metadata); + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + when(metadata.getColumnDataType(ANY_NAME_3)).thenReturn(DataType.INT); + when(metadata.getColumnDataType(ANY_NAME_4)).thenReturn(DataType.INT); + } + + private Put preparePut() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Put preparePutWithoutClusteringKey() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Delete prepareDelete() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Delete prepareDeleteWithoutClusteringKey() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + } + + private ObjectStorageRecord prepareExistingRecord() { + Map values = new HashMap<>(); + values.put(ANY_NAME_3, ANY_INT_1); + values.put(ANY_NAME_4, ANY_INT_2); + return new ObjectStorageRecord("concat_key", null, null, values); + } + + @Test + public void handle_PutWithoutConditionsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutConditionsGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutWithoutClusteringKeyGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = preparePutWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutClusteringKeyGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = preparePutWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(put); + + // Assert + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)) + .isInstanceOf(ExecutionException.class) + .hasCause(exception); + } + + @Test + public void handle_PutIfNotExistsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfNotExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfNotExistsGiven_WhenPartitionAndRecordExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + Map partition = new HashMap<>(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_PutIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_PutIfExistsGiven_WhenPartitionAndRecordExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionAndRecordExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) + .build()) + .build(); + Map partition = new HashMap<>(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + private void assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + String expectedObjectKey, String expectedConcatenatedKey) + throws ObjectStorageWrapperException { + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map insertedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(insertedPartition).containsKey(expectedConcatenatedKey); + assertThat(insertedPartition.get(expectedConcatenatedKey).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + + private void assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + String expectedObjectKey, String expectedConcatenatedKey) + throws ObjectStorageWrapperException { + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(updatedPartition).containsKey(expectedConcatenatedKey); + assertThat(updatedPartition.get(expectedConcatenatedKey).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String expectedExistingRecordKey = "existing_record_key"; + partition.put(expectedExistingRecordKey, prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getConcatenatedPartitionKey(), expectedExistingRecordKey); + } + + @Test + public void handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void + handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = prepareDeleteWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String expectedExistingRecordKey = "existing_record_key"; + partition.put(expectedExistingRecordKey, prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void + handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = prepareDeleteWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void handle_DeleteWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)) + .isInstanceOf(ExecutionException.class) + .hasCause(exception); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String expectedExistingRecordKey = "existing_record_key"; + partition.put(expectedExistingRecordKey, prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String expectedExistingRecordKey = "existing_record_key"; + partition.put(expectedExistingRecordKey, prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_DeleteIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + Map partition = new HashMap<>(); + partition.put(mutation.getRecordId(), prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + private void assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + String expectedObjectKey, String expectedConcatenatedKey, String expectedExistingRecordKey) + throws ObjectStorageWrapperException { + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(updatedPartition).doesNotContainKey(expectedConcatenatedKey); + assertThat(updatedPartition).containsKey(expectedExistingRecordKey); + } + + private void assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete( + String expectedObjectKey) throws ObjectStorageWrapperException { + verify(wrapper).delete(objectKeyCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_MultipleMutationsForSinglePartitionGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); + Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map insertedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(insertedPartition).containsKey(mutation1.getRecordId()); + assertThat(insertedPartition.get(mutation1.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation2.getRecordId()); + assertThat(insertedPartition.get(mutation2.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation3.getRecordId()); + assertThat(insertedPartition.get(mutation3.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation4.getRecordId()); + assertThat(insertedPartition.get(mutation4.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + + @Test + public void + handle_MultipleMutationsForSinglePartitionGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); + Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + Map partition = new HashMap<>(); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + Map updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(updatedPartition).containsKey(mutation1.getRecordId()); + assertThat(updatedPartition.get(mutation1.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(updatedPartition).containsKey(mutation2.getRecordId()); + assertThat(updatedPartition.get(mutation2.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(updatedPartition).containsKey(mutation3.getRecordId()); + assertThat(updatedPartition.get(mutation3.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(updatedPartition).containsKey(mutation4.getRecordId()); + assertThat(updatedPartition.get(mutation4.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_MultipleMutationsForDifferentPartitionGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = + Put.newBuilder(preparePut()) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) + .clusteringKey(Key.ofText(ANY_NAME_2, "put3")) + .build(); + Put put4 = + Put.newBuilder(preparePut()) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) + .clusteringKey(Key.ofText(ANY_NAME_2, "put4")) + .build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey1 = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + String expectedObjectKey2 = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation3.getConcatenatedPartitionKey()); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper, times(2)).get(objectKeyCaptor.capture()); + List capturedObjectKeys = objectKeyCaptor.getAllValues(); + assertThat(capturedObjectKeys) + .containsExactlyInAnyOrder(expectedObjectKey1, expectedObjectKey2); + verify(wrapper, times(2)).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + List insertedObjectKeys = objectKeyCaptor.getAllValues().subList(2, 4); + assertThat(insertedObjectKeys) + .containsExactlyInAnyOrder(expectedObjectKey1, expectedObjectKey2); + + List insertedPayloads = payloadCaptor.getAllValues(); + for (int i = 0; i < insertedPayloads.size(); i++) { + Map insertedPartition = + Serializer.deserialize( + insertedPayloads.get(i), new TypeReference>() {}); + if (insertedObjectKeys.get(i).equals(expectedObjectKey1)) { + assertThat(insertedPartition).containsKey(mutation1.getRecordId()); + assertThat(insertedPartition.get(mutation1.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation2.getRecordId()); + assertThat(insertedPartition.get(mutation2.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } else if (insertedObjectKeys.get(i).equals(expectedObjectKey2)) { + assertThat(insertedPartition).containsKey(mutation3.getRecordId()); + assertThat(insertedPartition.get(mutation3.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation4.getRecordId()); + assertThat(insertedPartition.get(mutation4.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + } + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java new file mode 100644 index 0000000000..f2bdd1069e --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java @@ -0,0 +1,551 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.Scan.Ordering.Order; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import org.assertj.core.util.Sets; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageAdminTest { + private static final String METADATA_NAMESPACE = "scalardb"; + + @Mock private ObjectStorageWrapper wrapper; + @Mock private ObjectStorageConfig config; + private ObjectStorageAdmin admin; + + @Captor private ArgumentCaptor objectKeyCaptor; + @Captor private ArgumentCaptor payloadCaptor; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(config.getMetadataNamespace()).thenReturn(METADATA_NAMESPACE); + admin = new ObjectStorageAdmin(wrapper, config); + } + + @Test + public void getTableMetadata_ShouldReturnCorrectTableMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + String objectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + Map columnsMap = + new ImmutableMap.Builder() + .put("c1", "int") + .put("c2", "text") + .put("c3", "bigint") + .put("c4", "boolean") + .put("c5", "blob") + .put("c6", "float") + .put("c7", "double") + .put("c8", "date") + .put("c9", "time") + .put("c10", "timestamp") + .put("c11", "timestamptz") + .build(); + + LinkedHashSet partitionKeyNames = Sets.newLinkedHashSet("c1"); + LinkedHashSet clusteringKeyNames = Sets.newLinkedHashSet("c2", "c3"); + Map clusteringOrders = ImmutableMap.of("c2", "ASC", "c3", "DESC"); + + ObjectStorageTableMetadata objectStorageTableMetadata = + new ObjectStorageTableMetadata( + partitionKeyNames, + clusteringKeyNames, + clusteringOrders, + Collections.emptySet(), + columnsMap); + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, objectStorageTableMetadata); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + + when(wrapper.get(objectKey)).thenReturn(Optional.of(response)); + + // Act + TableMetadata actual = admin.getTableMetadata(namespace, table); + + // Assert + assertThat(actual) + .isEqualTo( + TableMetadata.newBuilder() + .addPartitionKey("c1") + .addClusteringKey("c2", Order.ASC) + .addClusteringKey("c3", Order.DESC) + .addColumn("c1", DataType.INT) + .addColumn("c2", DataType.TEXT) + .addColumn("c3", DataType.BIGINT) + .addColumn("c4", DataType.BOOLEAN) + .addColumn("c5", DataType.BLOB) + .addColumn("c6", DataType.FLOAT) + .addColumn("c7", DataType.DOUBLE) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) + .build()); + } + + @Test + public void unsupportedOperations_ShouldThrowUnsupportedException() { + // Arrange + String namespace = "sample_ns"; + String table = "tbl"; + String column = "col"; + + // Act + Throwable thrown1 = + catchThrowable(() -> admin.createIndex(namespace, table, column, Collections.emptyMap())); + Throwable thrown2 = catchThrowable(() -> admin.dropIndex(namespace, table, column)); + Throwable thrown3 = + catchThrowable( + () -> admin.getImportTableMetadata(namespace, table, Collections.emptyMap())); + Throwable thrown4 = + catchThrowable(() -> admin.addRawColumnToTable(namespace, table, column, DataType.INT)); + Throwable thrown5 = + catchThrowable( + () -> + admin.importTable( + namespace, table, Collections.emptyMap(), Collections.emptyMap())); + Throwable thrown6 = catchThrowable(() -> admin.dropColumnFromTable(namespace, table, column)); + Throwable thrown7 = + catchThrowable(() -> admin.renameColumn(namespace, table, column, "newCol")); + Throwable thrown8 = catchThrowable(() -> admin.renameTable(namespace, table, "newTable")); + Throwable thrown9 = + catchThrowable(() -> admin.alterColumnType(namespace, table, column, DataType.INT)); + + // Assert + assertThat(thrown1).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown2).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown3).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown4).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown5).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown6).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown7).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown8).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown9).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void getNamespaceNames_ShouldWorkProperly() throws Exception { + // Arrange + Map namespaceMetadataTable = new HashMap<>(); + namespaceMetadataTable.put("ns1", new ObjectStorageNamespaceMetadata("ns1")); + namespaceMetadataTable.put("ns2", new ObjectStorageNamespaceMetadata("ns2")); + String serializedMetadata = Serializer.serialize(namespaceMetadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.of(response)); + + // Act + Set actualNamespaces = admin.getNamespaceNames(); + + // Assert + assertThat(actualNamespaces).containsExactlyInAnyOrder("ns1", "ns2"); + } + + @Test + public void getNamespaceNames_NamespaceMetadataTableDoesNotExist_ShouldReturnEmptySet() + throws Exception { + // Arrange + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.empty()); + + // Act + Set actualNamespaces = admin.getNamespaceNames(); + + // Assert + assertThat(actualNamespaces).isEmpty(); + } + + @Test + public void namespaceExists_WithExistingNamespace_ShouldReturnTrue() throws Exception { + // Arrange + String namespace = "ns"; + Map metadataTable = new HashMap<>(); + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.of(response)); + + // Act & Assert + assertThat(admin.namespaceExists(namespace)).isTrue(); + } + + @Test + public void namespaceExists_WithNonExistingNamespace_ShouldReturnFalse() throws Exception { + // Arrange + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.empty()); + + // Act & Assert + assertThat(admin.namespaceExists("ns")).isFalse(); + } + + @Test + public void namespaceExists_WithMetadataNamespace_ShouldReturnTrue() throws Exception { + // Act & Assert + assertThat(admin.namespaceExists(METADATA_NAMESPACE)).isTrue(); + } + + @Test + public void createNamespace_ShouldInsertNamespaceMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.empty()); + + // Act + admin.createNamespace(namespace, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(insertedMetadata).containsKey(namespace); + assertThat(insertedMetadata.get(namespace).getName()).isEqualTo(namespace); + } + + @Test + public void createTable_ShouldInsertTableMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String table = "sample_table"; + TableMetadata metadata = + TableMetadata.newBuilder() + .addPartitionKey("c3") + .addClusteringKey("c1", Order.DESC) + .addClusteringKey("c2", Order.ASC) + .addColumn("c1", DataType.TEXT) + .addColumn("c2", DataType.BIGINT) + .addColumn("c3", DataType.BOOLEAN) + .build(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.empty()); + + // Act + admin.createTable(namespace, table, metadata, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + assertThat(insertedMetadata).containsKey(tableMetadataKey); + ObjectStorageTableMetadata tableMetadata = insertedMetadata.get(tableMetadataKey); + assertThat(tableMetadata.getPartitionKeyNames()).containsExactly("c3"); + assertThat(tableMetadata.getClusteringKeyNames()).containsExactly("c1", "c2"); + assertThat(tableMetadata.getClusteringOrders()) + .containsEntry("c1", "DESC") + .containsEntry("c2", "ASC"); + assertThat(tableMetadata.getColumns()) + .containsEntry("c1", "text") + .containsEntry("c2", "bigint") + .containsEntry("c3", "boolean"); + } + + @Test + public void dropNamespace_ShouldDeleteNamespaceMetadata() throws Exception { + // Arrange + String namespace = "ns"; + Map metadataTable = new HashMap<>(); + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropNamespace(namespace); + + // Assert + verify(wrapper).delete(eq(expectedObjectKey), eq("version1")); + } + + @Test + public void getNamespaceTableNames_ShouldReturnTableNamesProperly() throws Exception { + // Arrange + String namespace = "ns"; + String tableMetadataKey1 = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t1"; + String tableMetadataKey2 = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t2"; + String tableMetadataKey3 = "other_ns" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t3"; + + Map metadataTable = new HashMap<>(); + metadataTable.put( + tableMetadataKey1, new ObjectStorageTableMetadata(null, null, null, null, null)); + metadataTable.put( + tableMetadataKey2, new ObjectStorageTableMetadata(null, null, null, null, null)); + metadataTable.put( + tableMetadataKey3, new ObjectStorageTableMetadata(null, null, null, null, null)); + + String serializedTableMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse tableMetadataResponse = + new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); + + Map namespaceMetadata = new HashMap<>(); + namespaceMetadata.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + String serializedNamespaceMetadata = Serializer.serialize(namespaceMetadata); + ObjectStorageWrapperResponse namespaceResponse = + new ObjectStorageWrapperResponse(serializedNamespaceMetadata, "version1"); + + String tableMetadataObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + String namespaceMetadataObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(tableMetadataObjectKey)).thenReturn(Optional.of(tableMetadataResponse)); + when(wrapper.get(namespaceMetadataObjectKey)).thenReturn(Optional.of(namespaceResponse)); + + // Act + Set actualTableNames = admin.getNamespaceTableNames(namespace); + + // Assert + assertThat(actualTableNames).containsExactlyInAnyOrder("t1", "t2"); + } + + @Test + public void addNewColumnToTable_ShouldWorkProperly() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String currentColumn = "c1"; + String newColumn = "c2"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + + LinkedHashSet partitionKeyNames = Sets.newLinkedHashSet(currentColumn); + Map columns = ImmutableMap.of(currentColumn, "text"); + ObjectStorageTableMetadata existingTableMetadata = + new ObjectStorageTableMetadata( + partitionKeyNames, null, null, Collections.emptySet(), columns); + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, existingTableMetadata); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.addNewColumnToTable(namespace, table, newColumn, DataType.INT); + + // Assert + verify(wrapper).update(eq(expectedObjectKey), payloadCaptor.capture(), eq("version1")); + + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + + ObjectStorageTableMetadata updatedTableMetadata = updatedMetadata.get(tableMetadataKey); + assertThat(updatedTableMetadata.getPartitionKeyNames()).containsExactly(currentColumn); + assertThat(updatedTableMetadata.getColumns()) + .containsEntry(currentColumn, "text") + .containsEntry(newColumn, "int"); + } + + @Test + public void repairNamespace_ShouldUpsertNamespaceMetadata() throws Exception { + // Arrange + String namespace = "ns"; + Map metadataTable = new HashMap<>(); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.repairNamespace(namespace, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(insertedMetadata).containsKey(namespace); + assertThat(insertedMetadata.get(namespace).getName()).isEqualTo(namespace); + } + + @Test + public void repairTable_ShouldUpsertTableMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String table = "tbl"; + TableMetadata tableMetadata = + TableMetadata.newBuilder() + .addColumn("c1", DataType.INT) + .addColumn("c2", DataType.TEXT) + .addColumn("c3", DataType.BIGINT) + .addPartitionKey("c1") + .build(); + + Map metadataTable = new HashMap<>(); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.repairTable(namespace, table, tableMetadata, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + assertThat(insertedMetadata).containsKey(tableMetadataKey); + + ObjectStorageTableMetadata insertedTableMetadata = insertedMetadata.get(tableMetadataKey); + assertThat(insertedTableMetadata.getPartitionKeyNames()).containsExactly("c1"); + assertThat(insertedTableMetadata.getColumns()) + .containsEntry("c1", "int") + .containsEntry("c2", "text") + .containsEntry("c3", "bigint"); + } + + @Test + public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception { + // Arrange + String tableMetadataKey1 = "ns1" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl1"; + String tableMetadataKey2 = "ns1" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl2"; + String tableMetadataKey3 = "ns2" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl3"; + + Map tableMetadataMap = new HashMap<>(); + tableMetadataMap.put( + tableMetadataKey1, new ObjectStorageTableMetadata(null, null, null, null, null)); + tableMetadataMap.put( + tableMetadataKey2, new ObjectStorageTableMetadata(null, null, null, null, null)); + tableMetadataMap.put( + tableMetadataKey3, new ObjectStorageTableMetadata(null, null, null, null, null)); + String serializedTableMetadata = Serializer.serialize(tableMetadataMap); + ObjectStorageWrapperResponse tableMetadataResponse = + new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); + + Map namespaceMetadataMap = new HashMap<>(); + String serializedNamespaceMetadata = Serializer.serialize(namespaceMetadataMap); + ObjectStorageWrapperResponse namespaceMetadataResponse = + new ObjectStorageWrapperResponse(serializedNamespaceMetadata, "version2"); + + String tableMetadataObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + String namespaceMetadataObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + // Mock table metadata to return existing tables + when(wrapper.get(tableMetadataObjectKey)).thenReturn(Optional.of(tableMetadataResponse)); + + // First call returns empty namespace metadata, second call returns metadata with ns1 + Map namespaceMetadataMapAfterInsert = new HashMap<>(); + namespaceMetadataMapAfterInsert.put("ns1", new ObjectStorageNamespaceMetadata("ns1")); + String serializedNamespaceMetadataAfterInsert = + Serializer.serialize(namespaceMetadataMapAfterInsert); + ObjectStorageWrapperResponse namespaceMetadataResponseAfterInsert = + new ObjectStorageWrapperResponse(serializedNamespaceMetadataAfterInsert, "version3"); + + when(wrapper.get(namespaceMetadataObjectKey)) + .thenReturn(Optional.of(namespaceMetadataResponse)) + .thenReturn(Optional.of(namespaceMetadataResponseAfterInsert)); + + // Act + admin.upgrade(Collections.emptyMap()); + + // Assert + // First namespace should trigger insert (when metadata table is empty) + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(insertedMetadata).containsKey("ns1"); + + // Second namespace should trigger update (when metadata table is not empty) + verify(wrapper) + .update( + eq(namespaceMetadataObjectKey), + payloadCaptor.capture(), + eq(namespaceMetadataResponseAfterInsert.getVersion())); + + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(updatedMetadata).containsKeys("ns1", "ns2"); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java new file mode 100644 index 0000000000..4f8cd0a2c4 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java @@ -0,0 +1,114 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Key; +import java.util.Collections; +import java.util.LinkedHashSet; +import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageMutationTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + private static final int ANY_INT_2 = 2; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + } + + private Put preparePut() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Delete prepareDelete() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + @Test + public void makeRecord_PutGiven_ShouldReturnWithValues() { + // Arrange + Put put = preparePut(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); + String concatenatedKey = objectStorageMutation.getRecordId(); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(concatenatedKey); + Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isEqualTo(ANY_INT_1); + Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); + } + + @Test + public void makeRecord_PutWithNullValueGiven_ShouldReturnWithValues() { + // Arrange + Put put = preparePut(); + put = Put.newBuilder(put).intValue(ANY_NAME_3, null).build(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); + String concatenatedKey = objectStorageMutation.getRecordId(); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(concatenatedKey); + Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + Assertions.assertThat(actual.getValues().containsKey(ANY_NAME_3)).isTrue(); + Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isNull(); + Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); + } + + @Test + public void makeRecord_DeleteGiven_ShouldReturnEmpty() { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(delete, metadata); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(""); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java new file mode 100644 index 0000000000..bef7822d98 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java @@ -0,0 +1,669 @@ +package com.scalar.db.storage.objectstorage; + +import static com.scalar.db.api.ConditionBuilder.column; +import static com.scalar.db.api.ConditionBuilder.deleteIf; +import static com.scalar.db.api.ConditionBuilder.deleteIfExists; +import static com.scalar.db.api.ConditionBuilder.putIf; +import static com.scalar.db.api.ConditionBuilder.putIfExists; +import static com.scalar.db.api.ConditionBuilder.putIfNotExists; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.openMocks; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.MutationCondition; +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.api.StorageInfo; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.StorageInfoImpl; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; + +public class ObjectStorageOperationCheckerTest { + private static final String NAMESPACE_NAME = "n1"; + private static final String TABLE_NAME = "t1"; + private static final String PKEY1 = "p1"; + private static final String CKEY1 = "c1"; + private static final String COL1 = "v1"; + private static final String COL2 = "v2"; + private static final String COL3 = "v3"; + private static final String COL4 = "v4"; + private static final StorageInfo STORAGE_INFO = + new StorageInfoImpl("ObjectStorage", StorageInfo.MutationAtomicityUnit.STORAGE, 100); + + private static final TableMetadata TABLE_METADATA1 = + TableMetadata.newBuilder() + .addColumn(PKEY1, DataType.INT) + .addColumn(CKEY1, DataType.INT) + .addColumn(COL1, DataType.INT) + .addColumn(COL2, DataType.BOOLEAN) + .addColumn(COL3, DataType.TEXT) + .addColumn(COL4, DataType.BLOB) + .addPartitionKey(PKEY1) + .addClusteringKey(CKEY1) + .build(); + + private static final TableMetadata TABLE_METADATA2 = + TableMetadata.newBuilder() + .addColumn(PKEY1, DataType.TEXT) + .addColumn(CKEY1, DataType.TEXT) + .addPartitionKey(PKEY1) + .addClusteringKey(CKEY1) + .build(); + + @Mock private DatabaseConfig databaseConfig; + @Mock private TableMetadataManager metadataManager; + @Mock private StorageInfoProvider storageInfoProvider; + private ObjectStorageOperationChecker operationChecker; + + @BeforeEach + public void setUp() throws Exception { + openMocks(this).close(); + when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); + operationChecker = + new ObjectStorageOperationChecker(databaseConfig, metadataManager, storageInfoProvider); + } + + @Test + public void check_ForMutationsWithPut_ShouldDoNothing() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Put putWithoutSettingIndex = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 0)) + .build(); + Put put = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(putWithoutSettingIndex, put))) + .doesNotThrowAnyException(); + } + + @Test + public void check_ForMutationsWithDelete_ShouldDoNothing() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Delete deleteWithoutSettingIndex = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 0)) + .build(); + Delete delete = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(deleteWithoutSettingIndex, delete))) + .doesNotThrowAnyException(); + } + + @Test + public void + check_GetGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Get get1 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get2 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get3 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .build(); + Get get4 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get5 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(get1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(get2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get3)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get4)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get5)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_ScanGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Scan scan1 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan2 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan3 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan4 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(scan1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(scan2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(scan3)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(scan4)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_PutGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Put put1 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put2 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put3 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(put1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(put2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(put3)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_DeleteGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Delete delete1 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete2 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete3 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(delete1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(delete2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(delete3)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_MutationsGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); + + Put put1 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put2 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete1 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete2 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(put1, delete1))) + .doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put2, delete1))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put1, delete2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForPutWithCondition_ShouldBehaveProperly() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + + // Act Assert + assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfExists()))) + .doesNotThrowAnyException(); + assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfNotExists()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL1).isNullInt()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNotEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isGreaterThanBoolean(false)).build()))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForDeleteWithCondition_ShouldBehaveProperly() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + + // Act Assert + assertThatCode(() -> operationChecker.check(buildDeleteWithCondition(deleteIfExists()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isNotEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isGreaterThanBoolean(false)).build()))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForMutationsWithPutWithCondition_ShouldBehaveProperly() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Put put = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .build(); + + // Act Assert + assertThatCode( + () -> operationChecker.check(Arrays.asList(buildPutWithCondition(putIfExists()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check(Arrays.asList(buildPutWithCondition(putIfNotExists()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL1).isNullInt()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isNotEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()), + put))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isGreaterThanBoolean(false)).build()), + put))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), + put))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForMutationsWithDeleteWithCondition_ShouldBehaveProperly() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Delete delete = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .build(); + + // Act Assert + assertThatCode( + () -> + operationChecker.check( + Arrays.asList(buildDeleteWithCondition(deleteIfExists()), delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isNotEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isGreaterThanBoolean(false)).build()), + delete))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), + delete))) + .isInstanceOf(IllegalArgumentException.class); + } + + private Put buildPutWithCondition(MutationCondition condition) { + return Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .condition(condition) + .build(); + } + + private Delete buildDeleteWithCondition(MutationCondition condition) { + return Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .condition(condition) + .build(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java new file mode 100644 index 0000000000..6bdb1347d4 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java @@ -0,0 +1,109 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashSet; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageOperationTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + } + + @Test + public void checkArgument_WrongOperationGiven_ShouldThrowIllegalArgumentException() { + // Arrange + Operation operation = mock(Put.class); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(operation, metadata); + + // Act Assert + assertThatThrownBy(() -> objectStorageOperation.checkArgument(Get.class)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void getConcatenatedPartitionKey_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { + // Arrange + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_2, ANY_NAME_3))); + + Key partitionKey = + Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_2, ANY_TEXT_2, ANY_NAME_3, ANY_INT_1); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); + + // Act + String actual = objectStorageOperation.getConcatenatedPartitionKey(); + + // Assert + assertThat(actual) + .isEqualTo( + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + ANY_TEXT_1, + ANY_TEXT_2, + String.valueOf(ANY_INT_1))); + } + + @Test + public void getId_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { + // Arrange + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_3))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + + Key partitionKey = Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_3, ANY_INT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); + + // Act + String actual = objectStorageOperation.getRecordId(); + + // Assert + assertThat(actual) + .isEqualTo( + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + ANY_TEXT_1, + String.valueOf(ANY_INT_1), + ANY_TEXT_2)); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java new file mode 100644 index 0000000000..0c57b5f583 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java @@ -0,0 +1,48 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; + +public class ObjectStorageUtilsTest { + @Test + public void getObjectKey_GivenAllNames_ShouldReturnExpectedObjectKey() { + // Arrange + String namespaceName = "namespace"; + String tableName = "table"; + String partitionName = "partition"; + + // Act + String actual = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionName); + + // Assert + assertThat(actual).isEqualTo("namespace/table/partition"); + } + + @Test + public void getObjectKey_GivenNamespaceAndTableNames_ShouldReturnExpectedObjectKeyPrefix() { + // Arrange + String namespaceName = "namespace"; + String tableName = "table"; + String partitionName = ""; + + // Act + String actual = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionName); + + // Assert + assertThat(actual).isEqualTo("namespace/table/"); + } + + @Test + public void getObjectKey_GivenNamespaceAndTableNames_ShouldReturnExpectedObjectKey() { + // Arrange + String namespaceName = "namespace"; + String tableName = "table"; + + // Act + String actual = ObjectStorageUtils.getObjectKey(namespaceName, tableName); + + // Assert + assertThat(actual).isEqualTo("namespace/table"); + } +} diff --git a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java index 476b33fc37..9364e9030c 100644 --- a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java @@ -4221,8 +4221,9 @@ void scan_OverlappingPutWithConjunctionsGivenBefore_ShouldThrowIllegalArgumentEx @ParameterizedTest @EnumSource(Isolation.class) - void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( - Isolation isolation) throws TransactionException { + public void + scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( + Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); DistributedTransaction transaction = manager.begin(); @@ -4246,7 +4247,7 @@ void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBef @ParameterizedTest @EnumSource(Isolation.class) - void + public void scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( Isolation isolation) throws TransactionException { // Arrange @@ -4269,7 +4270,7 @@ void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBef @ParameterizedTest @EnumSource(Isolation.class) - void + public void scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( Isolation isolation) throws TransactionException { // Arrange @@ -4290,8 +4291,9 @@ void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBef @ParameterizedTest @EnumSource(Isolation.class) - void scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( - Isolation isolation) throws TransactionException { + public void + scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); populateRecord(manager, namespace1, TABLE_1); @@ -4310,7 +4312,7 @@ void scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegal @ParameterizedTest @EnumSource(Isolation.class) - void + public void scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException( Isolation isolation) throws TransactionException { // Arrange @@ -5447,7 +5449,7 @@ void scan_ScanAllWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() + public void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -5513,7 +5515,7 @@ void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void + public void scan_ScanWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -5590,8 +5592,9 @@ void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.mutate( @@ -5665,7 +5668,7 @@ void scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThr } @Test - void + public void scan_ScanWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -5741,8 +5744,9 @@ void scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThr } @Test - void scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.mutate( @@ -5815,7 +5819,7 @@ void scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThr } @Test - void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() + public void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -5882,7 +5886,7 @@ void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyExceptio } @Test - void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() + public void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -5914,7 +5918,7 @@ void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void + public void get_GetWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -5957,8 +5961,9 @@ void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.insert( @@ -5998,7 +6003,7 @@ void get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6040,8 +6045,9 @@ void get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrow } @Test - void get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.insert( @@ -6297,8 +6303,9 @@ void getScanner_RecordInsertedByAnotherTransaction_WithSerializable_ShouldNotThr } @Test - void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -6318,7 +6325,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange @@ -6360,7 +6367,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6404,7 +6411,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange @@ -6435,7 +6442,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6469,7 +6476,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow @ParameterizedTest @EnumSource(Isolation.class) - void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) + public void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); @@ -6524,7 +6531,7 @@ void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) @ParameterizedTest @EnumSource(Isolation.class) - void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) + public void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); From 8938ee98017b9f1f179be94dadc751c39908b6de Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Thu, 30 Oct 2025 23:48:16 +0900 Subject: [PATCH 03/20] Add integration tests for Wrapper --- .../BlobWrapperIntegrationTest.java | 10 + ...jectStorageWrapperIntegrationTestBase.java | 285 ++++++++++++++++++ 2 files changed, 295 insertions(+) create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java new file mode 100644 index 0000000000..843678f8f9 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java @@ -0,0 +1,10 @@ +package com.scalar.db.storage.objectstorage; + +import java.util.Properties; + +public class BlobWrapperIntegrationTest extends ObjectStorageWrapperIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java new file mode 100644 index 0000000000..4add81ec7e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java @@ -0,0 +1,285 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; + +import com.scalar.db.config.DatabaseConfig; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class ObjectStorageWrapperIntegrationTestBase { + private static final Logger logger = + LoggerFactory.getLogger(ObjectStorageWrapperIntegrationTestBase.class); + + private static final String TEST_NAME = "object_storage_wrapper_integration_test"; + private static final String TEST_KEY1 = "test-key1"; + private static final String TEST_KEY2 = "test-key2"; + private static final String TEST_KEY3 = "test-key3"; + private static final String TEST_KEY_PREFIX = "test-key"; + private static final String TEST_OBJECT1 = "test-object1"; + private static final String TEST_OBJECT2 = "test-object2"; + private static final String TEST_OBJECT3 = "test-object3"; + + protected ObjectStorageWrapper wrapper; + + @BeforeAll + public void beforeAll() throws ObjectStorageWrapperException { + Properties properties = getProperties(TEST_NAME); + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + createObjects(); + } + + @AfterAll + public void afterAll() { + try { + deleteObjects(); + } catch (Exception e) { + logger.warn("Failed to delete objects", e); + } + + try { + if (wrapper != null) { + wrapper.close(); + } + } catch (Exception e) { + logger.warn("Failed to close wrapper", e); + } + } + + protected abstract Properties getProperties(String testName); + + private void createObjects() throws ObjectStorageWrapperException { + wrapper.insert(TEST_KEY1, TEST_OBJECT1); + wrapper.insert(TEST_KEY2, TEST_OBJECT2); + wrapper.insert(TEST_KEY3, TEST_OBJECT3); + } + + protected void deleteObjects() throws ObjectStorageWrapperException { + wrapper.delete(TEST_KEY1); + wrapper.delete(TEST_KEY2); + wrapper.delete(TEST_KEY3); + } + + @Test + public void get_ExistingObjectKeyGiven_ShouldReturnCorrectObject() throws Exception { + // Arrange + + // Act + Optional response = wrapper.get(TEST_KEY1); + + // Assert + assertThat(response.isPresent()).isTrue(); + assertThat(response.get().getPayload()).isEqualTo(TEST_OBJECT1); + } + + @Test + public void get_NonExistingObjectKeyGiven_ShouldReturnEmptyOptional() throws Exception { + // Arrange + + // Act + Optional response = wrapper.get("non-existing-key"); + + // Assert + assertThat(response.isPresent()).isFalse(); + } + + @Test + public void insert_NewObjectKeyGiven_ShouldInsertObjectSuccessfully() throws Exception { + // Arrange + String objectKey = "new-object-key"; + String object = "new-object"; + + try { + // Act + wrapper.insert(objectKey, object); + + // Assert + Optional response = wrapper.get(objectKey); + assertThat(response.isPresent()).isTrue(); + assertThat(response.get().getPayload()).isEqualTo(object); + } finally { + wrapper.delete(objectKey); + } + } + + @Test + public void insert_ExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + + // Act Assert + assertThatCode(() -> wrapper.insert(TEST_KEY2, "another-object")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_ExistingObjectKeyGiven_ShouldUpdateObjectSuccessfully() throws Exception { + // Arrange + String updatedObject = "updated-object2"; + Optional response1 = wrapper.get(TEST_KEY2); + assertThat(response1.isPresent()).isTrue(); + String version = response1.get().getVersion(); + + try { + // Act + wrapper.update(TEST_KEY2, updatedObject, version); + + // Assert + Optional response2 = wrapper.get(TEST_KEY2); + assertThat(response2.isPresent()).isTrue(); + assertThat(response2.get().getPayload()).isEqualTo(updatedObject); + } finally { + wrapper.delete(TEST_KEY2); + wrapper.insert(TEST_KEY2, TEST_OBJECT2); + } + } + + @Test + public void update_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String objectKey = "non-existing-key"; + + // Act Assert + assertThatCode(() -> wrapper.update(objectKey, "some-object", "some-version")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_WrongVersionGiven_ShouldThrowPreconditionFailedException() throws Exception { + // Arrange + String wrongVersion = "wrong-version"; + + // Act Assert + assertThatCode(() -> wrapper.update(TEST_KEY2, "another-object", wrongVersion)) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void delete_ExistingObjectKeyGiven_ShouldDeleteObjectSuccessfully() throws Exception { + // Arrange + Optional response1 = wrapper.get(TEST_KEY3); + assertThat(response1.isPresent()).isTrue(); + + try { + // Act + wrapper.delete(TEST_KEY3); + + // Assert + Optional response2 = wrapper.get(TEST_KEY3); + assertThat(response2.isPresent()).isFalse(); + } finally { + wrapper.insert(TEST_KEY3, TEST_OBJECT3); + } + } + + @Test + public void delete_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String objectKey = "non-existing-key"; + + // Act Assert + assertThatCode(() -> wrapper.delete(objectKey)).isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void delete_ExistingObjectKeyWithCorrectVersionGiven_ShouldDeleteObjectSuccessfully() + throws Exception { + // Arrange + Optional response1 = wrapper.get(TEST_KEY1); + assertThat(response1.isPresent()).isTrue(); + String version = response1.get().getVersion(); + + try { + // Act + wrapper.delete(TEST_KEY1, version); + + // Assert + Optional response2 = wrapper.get(TEST_KEY1); + assertThat(response2.isPresent()).isFalse(); + } finally { + wrapper.insert(TEST_KEY1, TEST_OBJECT1); + } + } + + @Test + public void delete_ExistingObjectKeyWithWrongVersionGiven_ShouldThrowPreconditionFailedException() + throws Exception { + // Arrange + Optional response1 = wrapper.get(TEST_KEY1); + assertThat(response1.isPresent()).isTrue(); + String wrongVersion = "wrong-version"; + + // Act Assert + assertThatCode(() -> wrapper.delete(TEST_KEY1, wrongVersion)) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void getKeys_WithPrefix_ShouldReturnCorrectKeys() throws Exception { + // Arrange + + // Act + Set keys = wrapper.getKeys(TEST_KEY_PREFIX); + + // Assert + assertThat(keys).containsExactlyInAnyOrder(TEST_KEY1, TEST_KEY2, TEST_KEY3); + } + + @Test + public void getKeys_WithNonExistingPrefix_ShouldReturnEmptySet() throws Exception { + // Arrange + String nonExistingPrefix = "non-existing-prefix"; + + // Act + Set keys = wrapper.getKeys(nonExistingPrefix); + + // Assert + assertThat(keys).isEmpty(); + } + + @Test + public void deleteByPrefix_WithExistingPrefix_ShouldDeleteObjectsSuccessfully() throws Exception { + // Arrange + + try { + // Act + wrapper.deleteByPrefix(TEST_KEY_PREFIX); + + // Assert + Set keys = wrapper.getKeys(TEST_KEY_PREFIX); + assertThat(keys).isEmpty(); + } finally { + createObjects(); + } + } + + @Test + public void deleteByPrefix_WithNonExistingPrefix_ShouldDoNothing() throws Exception { + // Arrange + String nonExistingPrefix = "non-existing-prefix"; + + // Act + wrapper.deleteByPrefix(nonExistingPrefix); + + // Assert + Set keys = wrapper.getKeys(TEST_KEY_PREFIX); + assertThat(keys).containsExactlyInAnyOrder(TEST_KEY1, TEST_KEY2, TEST_KEY3); + } + + @Test + public void close_ShouldNotThrowException() { + // Arrange + + // Act Assert + assertThatCode(() -> wrapper.close()).doesNotThrowAnyException(); + } +} From ba7b0e979df41e734d187e9e8965a2dc2e5e31e6 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 09:08:05 +0900 Subject: [PATCH 04/20] Refactor --- .../BlobWrapperIntegrationTest.java | 1 + ...AdminIntegrationTestWithObjectStorage.java | 5 + ...nScanIntegrationTestWithObjectStorage.java | 23 - ...ommitIntegrationTestWithObjectStorage.java | 31 - ...adataIntegrationTestWithObjectStorage.java | 13 - .../ConsensusCommitObjectStorageEnv.java | 22 - ...cificIntegrationTestWithObjectStorage.java | 130 --- ...abledIntegrationTestWithObjectStorage.java | 13 - ...geAdminCaseSensitivityIntegrationTest.java | 4 + .../ObjectStorageAdminIntegrationTest.java | 4 + ...jectStorageAdminRepairIntegrationTest.java | 1 + ...StorageCaseSensitivityIntegrationTest.java | 45 - ...jectStorageColumnValueIntegrationTest.java | 12 - ...ageConditionalMutationIntegrationTest.java | 30 - ...rageCrossPartitionScanIntegrationTest.java | 30 - .../ObjectStorageIntegrationTest.java | 43 - .../ObjectStorageJapaneseIntegrationTest.java | 13 - ...tipleClusteringKeyScanIntegrationTest.java | 51 - ...geMultiplePartitionKeyIntegrationTest.java | 40 - ...eMutationAtomicityUnitIntegrationTest.java | 19 - ...ingleClusteringKeyScanIntegrationTest.java | 45 - ...rageSinglePartitionKeyIntegrationTest.java | 30 - .../objectstorage/ObjectStorageTestUtils.java | 19 - ...ageWithReservedKeywordIntegrationTest.java | 45 - ...AdminIntegrationTestWithObjectStorage.java | 4 + ...ctionIntegrationTestWithObjectStorage.java | 13 - ...nScanIntegrationTestWithObjectStorage.java | 23 - ...ommitIntegrationTestWithObjectStorage.java | 22 - ...cificIntegrationTestWithObjectStorage.java | 13 - ...abledIntegrationTestWithObjectStorage.java | 13 - .../ClusteringKeyComparator.java | 36 - .../objectstorage/ColumnValueMapper.java | 79 -- .../db/storage/objectstorage/MapVisitor.java | 92 -- .../objectstorage/MutateStatementHandler.java | 308 ------ .../storage/objectstorage/ObjectStorage.java | 142 --- .../objectstorage/ObjectStorageAdmin.java | 2 +- .../objectstorage/ObjectStorageConfig.java | 1 + .../objectstorage/ObjectStorageMutation.java | 61 -- .../ObjectStorageNamespaceMetadata.java | 1 + .../objectstorage/ObjectStorageOperation.java | 77 -- .../ObjectStorageOperationChecker.java | 154 --- .../objectstorage/ObjectStorageProvider.java | 3 +- .../objectstorage/ObjectStorageRecord.java | 77 -- .../ObjectStorageTableMetadata.java | 52 + .../ObjectStorageWrapperFactory.java | 1 + .../objectstorage/PartitionIdentifier.java | 45 - .../objectstorage/ResultInterpreter.java | 53 - .../db/storage/objectstorage/ScannerImpl.java | 59 -- .../objectstorage/SelectStatementHandler.java | 294 ------ .../objectstorage/StatementHandler.java | 131 --- .../objectstorage/blob/BlobProvider.java | 1 + .../MutateStatementHandlerTest.java | 992 ------------------ .../objectstorage/ObjectStorageAdminTest.java | 181 +++- .../ObjectStorageMutationTest.java | 114 -- .../ObjectStorageOperationCheckerTest.java | 669 ------------ .../ObjectStorageOperationTest.java | 109 -- .../objectstorage/ObjectStorageUtilsTest.java | 1 + 57 files changed, 232 insertions(+), 4260 deletions(-) delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java delete mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java delete mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java delete mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java delete mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java index 843678f8f9..9983b05f88 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java @@ -3,6 +3,7 @@ import java.util.Properties; public class BlobWrapperIntegrationTest extends ObjectStorageWrapperIntegrationTestBase { + @Override protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index 6c31e7a1ff..bb16d913c2 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -7,6 +7,7 @@ public class ConsensusCommitAdminIntegrationTestWithObjectStorage extends ConsensusCommitAdminIntegrationTestBase { + @Override protected Properties getProps(String testName) { return ObjectStorageEnv.getProperties(testName); @@ -17,6 +18,10 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } + @Override + @Disabled("Temporary disable because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java deleted file mode 100644 index 54d5c7d019..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; -import com.scalar.db.transaction.consensuscommit.ConsensusCommitCrossPartitionScanIntegrationTestBase; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage - extends ConsensusCommitCrossPartitionScanIntegrationTestBase { - - @Override - protected Properties getProps(String testName) { - Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); - properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); - return properties; - } - - @Test - @Override - @Disabled("Cross partition scan with ordering is not supported in Object Storages") - public void scan_CrossPartitionScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java deleted file mode 100644 index abb84e4a5d..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,31 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestBase; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; - -public class ConsensusCommitIntegrationTestWithObjectStorage - extends ConsensusCommitIntegrationTestBase { - @Override - protected Properties getProps(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } - - @Override - protected boolean isTimestampTypeSupported() { - return false; - } - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scanOrGetScanner_ScanGivenForIndexColumnWithConjunctions_ShouldReturnRecords( - ScanType scanType) {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java deleted file mode 100644 index 440e753212..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitNullMetadataIntegrationTestBase; -import java.util.Properties; - -public class ConsensusCommitNullMetadataIntegrationTestWithObjectStorage - extends ConsensusCommitNullMetadataIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java deleted file mode 100644 index 25d5c9a174..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitTestUtils; -import java.util.Map; -import java.util.Properties; - -public class ConsensusCommitObjectStorageEnv { - private ConsensusCommitObjectStorageEnv() {} - - public static Properties getProperties(String testName) { - Properties properties = ObjectStorageEnv.getProperties(testName); - - // Add testName as a coordinator schema suffix - ConsensusCommitTestUtils.addSuffixToCoordinatorNamespace(properties, testName); - - return ConsensusCommitTestUtils.loadConsensusCommitProperties(properties); - } - - public static Map getCreationOptions() { - return ObjectStorageEnv.getCreationOptions(); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java deleted file mode 100644 index 20f961cd6a..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,130 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitSpecificIntegrationTestBase; -import com.scalar.db.transaction.consensuscommit.Isolation; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; - -public class ConsensusCommitSpecificIntegrationTestWithObjectStorage - extends ConsensusCommitSpecificIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( - Isolation isolation) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( - Isolation isolation) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( - Isolation isolation) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( - Isolation isolation) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException( - Isolation isolation) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scan_ScanWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scan_ScanWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java deleted file mode 100644 index 884e464008..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; -import java.util.Properties; - -public class ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage - extends ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java index 43108de72f..163660f131 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -24,6 +24,10 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } + @Override + @Disabled("Temporary disable because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index 2223ea477c..f34d695010 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -22,6 +22,10 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } + @Override + @Disabled("Temporary disable because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java index 791cd20e53..8266848d4e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairIntegrationTest.java @@ -6,6 +6,7 @@ public class ObjectStorageAdminRepairIntegrationTest extends DistributedStorageAdminRepairIntegrationTestBase { + @Override protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java deleted file mode 100644 index 8515cbe204..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageCaseSensitivityIntegrationTestBase; -import java.util.Map; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; - -public class ObjectStorageCaseSensitivityIntegrationTest - extends DistributedStorageCaseSensitivityIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected Map getCreationOptions() { - return ObjectStorageEnv.getCreationOptions(); - } - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumn_ShouldGet() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanGivenForIndexedColumn_ShouldScan() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java deleted file mode 100644 index 1514c98f76..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java +++ /dev/null @@ -1,12 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageColumnValueIntegrationTestBase; -import java.util.Properties; - -public class ObjectStorageColumnValueIntegrationTest - extends DistributedStorageColumnValueIntegrationTestBase { - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java deleted file mode 100644 index 759dd22507..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.ConditionalExpression; -import com.scalar.db.api.DistributedStorageConditionalMutationIntegrationTestBase; -import java.util.List; -import java.util.Properties; -import java.util.stream.Collectors; - -public class ObjectStorageConditionalMutationIntegrationTest - extends DistributedStorageConditionalMutationIntegrationTestBase { - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected int getThreadNum() { - return 3; - } - - @Override - protected List getOperatorAndDataTypeListForTest() { - return super.getOperatorAndDataTypeListForTest().stream() - .filter( - operatorAndDataType -> - operatorAndDataType.getOperator() == ConditionalExpression.Operator.EQ - || operatorAndDataType.getOperator() == ConditionalExpression.Operator.NE) - .collect(Collectors.toList()); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java deleted file mode 100644 index e3761048db..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageCrossPartitionScanIntegrationTestBase; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class ObjectStorageCrossPartitionScanIntegrationTest - extends DistributedStorageCrossPartitionScanIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected int getThreadNum() { - return 3; - } - - @Override - protected boolean isParallelDdlSupported() { - return false; - } - - @Test - @Override - @Disabled("Cross partition scan with ordering is not supported in Object Storages") - public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java deleted file mode 100644 index ced27160f3..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java +++ /dev/null @@ -1,43 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageIntegrationTestBase; -import java.util.Map; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; - -public class ObjectStorageIntegrationTest extends DistributedStorageIntegrationTestBase { - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected Map getCreationOptions() { - return ObjectStorageEnv.getCreationOptions(); - } - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumn_ShouldGet() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanGivenForIndexedColumn_ShouldScan() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java deleted file mode 100644 index 4610d84aed..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageJapaneseIntegrationTestBase; -import java.util.Properties; - -public class ObjectStorageJapaneseIntegrationTest - extends DistributedStorageJapaneseIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java deleted file mode 100644 index e3a93e8ff6..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; -import java.util.List; -import java.util.Properties; -import java.util.stream.Collectors; - -public class ObjectStorageMultipleClusteringKeyScanIntegrationTest - extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected List getDataTypes() { - // Return types without BLOB because blob is not supported for clustering key for now - return super.getDataTypes().stream() - .filter(type -> type != DataType.BLOB) - .collect(Collectors.toList()); - } - - @Override - protected boolean isParallelDdlSupported() { - return false; - } - - @Override - protected int getThreadNum() { - return 3; - } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java deleted file mode 100644 index d3b077df18..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; -import java.util.Properties; - -public class ObjectStorageMultiplePartitionKeyIntegrationTest - extends DistributedStorageMultiplePartitionKeyIntegrationTestBase { - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected int getThreadNum() { - return 3; - } - - @Override - protected boolean isParallelDdlSupported() { - return false; - } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java deleted file mode 100644 index 98c4ea857f..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageMutationAtomicityUnitIntegrationTestBase; -import java.util.Map; -import java.util.Properties; - -public class ObjectStorageMutationAtomicityUnitIntegrationTest - extends DistributedStorageMutationAtomicityUnitIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected Map getCreationOptions() { - return ObjectStorageEnv.getCreationOptions(); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java deleted file mode 100644 index 955b94330b..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; - -public class ObjectStorageSingleClusteringKeyScanIntegrationTest - extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected List getClusteringKeyTypes() { - // Return types without BLOB because blob is not supported for clustering key for now - List clusteringKeyTypes = new ArrayList<>(); - for (DataType dataType : DataType.values()) { - if (dataType == DataType.BLOB) { - continue; - } - clusteringKeyTypes.add(dataType); - } - return clusteringKeyTypes; - } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java deleted file mode 100644 index 215993d078..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; -import java.util.Properties; - -public class ObjectStorageSinglePartitionKeyIntegrationTest - extends DistributedStorageSinglePartitionKeyIntegrationTestBase { - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java deleted file mode 100644 index 0263043fed..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.io.TextColumn; -import com.scalar.db.util.TestUtils; -import java.util.stream.IntStream; - -public class ObjectStorageTestUtils { - public static TextColumn getMinTextValue(String columnName) { - // Since ObjectStorage can't handle an empty string correctly, we use "0" as the min value - return TextColumn.of(columnName, "0"); - } - - public static TextColumn getMaxTextValue(String columnName) { - // Since ObjectStorage can't handle 0xFF character correctly, we use "ZZZ..." as the max value - StringBuilder builder = new StringBuilder(); - IntStream.range(0, TestUtils.MAX_TEXT_COUNT).forEach(i -> builder.append('Z')); - return TextColumn.of(columnName, builder.toString()); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java deleted file mode 100644 index ce6a1ffc2e..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.DistributedStorageWithReservedKeywordIntegrationTestBase; -import java.util.Map; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; - -public class ObjectStorageWithReservedKeywordIntegrationTest - extends DistributedStorageWithReservedKeywordIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } - - @Override - protected Map getCreationOptions() { - return ObjectStorageEnv.getCreationOptions(); - } - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumn_ShouldGet() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void - get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanGivenForIndexedColumn_ShouldScan() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java index beeae03f5b..9476edba7b 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -12,6 +12,10 @@ protected Properties getProps(String testName) { return ObjectStorageEnv.getProperties(testName); } + @Override + @Disabled("Temporary disable because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java deleted file mode 100644 index 7405f7e829..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionIntegrationTestBase; -import java.util.Properties; - -public class SingleCrudOperationTransactionIntegrationTestWithObjectStorage - extends SingleCrudOperationTransactionIntegrationTestBase { - - @Override - protected Properties getProps(String testName) { - return ObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java deleted file mode 100644 index d5ebdb1a82..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; -import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage - extends TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase { - - @Override - protected Properties getProps1(String testName) { - Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); - properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); - return properties; - } - - @Test - @Override - @Disabled("Cross partition scan with ordering is not supported in Object Storages") - public void scan_ScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java deleted file mode 100644 index 1d278f8f25..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitIntegrationTestBase; -import java.util.Properties; -import org.junit.jupiter.api.Disabled; - -public class TwoPhaseConsensusCommitIntegrationTestWithObjectStorage - extends TwoPhaseConsensusCommitIntegrationTestBase { - - @Override - protected Properties getProps1(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } - - @Override - @Disabled("Object Storage does not support index-related operations") - public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} - - @Override - @Disabled("Object Storage does not support index-related operations") - public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java deleted file mode 100644 index 1e4b66e32b..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitSpecificIntegrationTestBase; -import java.util.Properties; - -public class TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage - extends TwoPhaseConsensusCommitSpecificIntegrationTestBase { - - @Override - protected Properties getProperties1(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java deleted file mode 100644 index 38a95fd99e..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; -import java.util.Properties; - -public class TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage - extends TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ConsensusCommitObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java deleted file mode 100644 index 3a6df85542..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.google.common.collect.Ordering; -import com.scalar.db.api.Scan; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.Column; -import java.util.Comparator; -import java.util.Map; - -public class ClusteringKeyComparator implements Comparator> { - private final TableMetadata metadata; - - public ClusteringKeyComparator(TableMetadata metadata) { - this.metadata = metadata; - } - - @Override - public int compare(Map clusteringKey1, Map clusteringKey2) { - for (String columnName : metadata.getClusteringKeyNames()) { - Scan.Ordering.Order order = metadata.getClusteringOrder(columnName); - - Column column1 = - ColumnValueMapper.convert( - clusteringKey1.get(columnName), columnName, metadata.getColumnDataType(columnName)); - Column column2 = - ColumnValueMapper.convert( - clusteringKey2.get(columnName), columnName, metadata.getColumnDataType(columnName)); - - int cmp = Ordering.natural().compare(column1, column2); - if (cmp != 0) { - return order == Scan.Ordering.Order.ASC ? cmp : -cmp; - } - } - return 0; - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java deleted file mode 100644 index 34e2b2d780..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java +++ /dev/null @@ -1,79 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.io.BigIntColumn; -import com.scalar.db.io.BlobColumn; -import com.scalar.db.io.BooleanColumn; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; -import com.scalar.db.io.DateColumn; -import com.scalar.db.io.DoubleColumn; -import com.scalar.db.io.FloatColumn; -import com.scalar.db.io.IntColumn; -import com.scalar.db.io.TextColumn; -import com.scalar.db.io.TimeColumn; -import com.scalar.db.io.TimestampColumn; -import com.scalar.db.io.TimestampTZColumn; -import com.scalar.db.util.TimeRelatedColumnEncodingUtils; -import java.util.Base64; -import javax.annotation.Nullable; - -public class ColumnValueMapper { - public static Column convert(@Nullable Object recordValue, String name, DataType dataType) { - switch (dataType) { - case BOOLEAN: - return recordValue == null - ? BooleanColumn.ofNull(name) - : BooleanColumn.of(name, (boolean) recordValue); - case INT: - return recordValue == null - ? IntColumn.ofNull(name) - : IntColumn.of(name, ((Number) recordValue).intValue()); - case BIGINT: - return recordValue == null - ? BigIntColumn.ofNull(name) - : BigIntColumn.of(name, ((Number) recordValue).longValue()); - case FLOAT: - return recordValue == null - ? FloatColumn.ofNull(name) - : FloatColumn.of(name, ((Number) recordValue).floatValue()); - case DOUBLE: - return recordValue == null - ? DoubleColumn.ofNull(name) - : DoubleColumn.of(name, ((Number) recordValue).doubleValue()); - case TEXT: - return recordValue == null - ? TextColumn.ofNull(name) - : TextColumn.of(name, (String) recordValue); - case BLOB: - return recordValue == null - ? BlobColumn.ofNull(name) - : BlobColumn.of(name, Base64.getDecoder().decode((String) recordValue)); - case DATE: - return recordValue == null - ? DateColumn.ofNull(name) - : DateColumn.of( - name, TimeRelatedColumnEncodingUtils.decodeDate(((Number) recordValue).intValue())); - case TIME: - return recordValue == null - ? TimeColumn.ofNull(name) - : TimeColumn.of( - name, - TimeRelatedColumnEncodingUtils.decodeTime(((Number) recordValue).longValue())); - case TIMESTAMP: - return recordValue == null - ? TimestampColumn.ofNull(name) - : TimestampColumn.of( - name, - TimeRelatedColumnEncodingUtils.decodeTimestamp(((Number) recordValue).longValue())); - case TIMESTAMPTZ: - return recordValue == null - ? TimestampTZColumn.ofNull(name) - : TimestampTZColumn.of( - name, - TimeRelatedColumnEncodingUtils.decodeTimestampTZ( - ((Number) recordValue).longValue())); - default: - throw new AssertionError(); - } - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java deleted file mode 100644 index 6d9e2b4167..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.io.BigIntColumn; -import com.scalar.db.io.BlobColumn; -import com.scalar.db.io.BooleanColumn; -import com.scalar.db.io.ColumnVisitor; -import com.scalar.db.io.DateColumn; -import com.scalar.db.io.DoubleColumn; -import com.scalar.db.io.FloatColumn; -import com.scalar.db.io.IntColumn; -import com.scalar.db.io.TextColumn; -import com.scalar.db.io.TimeColumn; -import com.scalar.db.io.TimestampColumn; -import com.scalar.db.io.TimestampTZColumn; -import com.scalar.db.util.TimeRelatedColumnEncodingUtils; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.HashMap; -import java.util.Map; -import javax.annotation.concurrent.NotThreadSafe; - -@NotThreadSafe -public class MapVisitor implements ColumnVisitor { - private final Map values = new HashMap<>(); - - @SuppressFBWarnings("EI_EXPOSE_REP") - public Map get() { - return values; - } - - @Override - public void visit(BooleanColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getBooleanValue()); - } - - @Override - public void visit(IntColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getIntValue()); - } - - @Override - public void visit(BigIntColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getBigIntValue()); - } - - @Override - public void visit(FloatColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getFloatValue()); - } - - @Override - public void visit(DoubleColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getDoubleValue()); - } - - @Override - public void visit(TextColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getTextValue()); - } - - @Override - public void visit(BlobColumn column) { - values.put(column.getName(), column.hasNullValue() ? null : column.getBlobValue()); - } - - @Override - public void visit(DateColumn column) { - values.put( - column.getName(), - column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); - } - - @Override - public void visit(TimeColumn column) { - values.put( - column.getName(), - column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); - } - - @Override - public void visit(TimestampColumn column) { - values.put( - column.getName(), - column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); - } - - @Override - public void visit(TimestampTZColumn column) { - values.put( - column.getName(), - column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java deleted file mode 100644 index e1497c6617..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java +++ /dev/null @@ -1,308 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.scalar.db.api.Delete; -import com.scalar.db.api.DeleteIf; -import com.scalar.db.api.DeleteIfExists; -import com.scalar.db.api.Mutation; -import com.scalar.db.api.Put; -import com.scalar.db.api.PutIf; -import com.scalar.db.api.PutIfExists; -import com.scalar.db.api.PutIfNotExists; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.CoreError; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.exception.storage.ExecutionException; -import com.scalar.db.exception.storage.NoMutationException; -import com.scalar.db.exception.storage.RetriableExecutionException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -public class MutateStatementHandler extends StatementHandler { - public MutateStatementHandler( - ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { - super(wrapper, metadataManager); - } - - public void handle(Mutation mutation) throws ExecutionException { - TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); - ObjectStorageMutation objectStorageMutation = - new ObjectStorageMutation(mutation, tableMetadata); - mutate( - getNamespace(mutation), - getTable(mutation), - objectStorageMutation.getConcatenatedPartitionKey(), - Collections.singletonList(mutation)); - } - - public void handle(List mutations) throws ExecutionException { - Map> mutationPerPartition = new HashMap<>(); - for (Mutation mutation : mutations) { - TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); - ObjectStorageMutation objectStorageMutation = - new ObjectStorageMutation(mutation, tableMetadata); - String partitionKey = objectStorageMutation.getConcatenatedPartitionKey(); - PartitionIdentifier partitionIdentifier = - PartitionIdentifier.of(getNamespace(mutation), getTable(mutation), partitionKey); - mutationPerPartition - .computeIfAbsent(partitionIdentifier, k -> new ArrayList<>()) - .add(mutation); - } - for (Map.Entry> entry : mutationPerPartition.entrySet()) { - PartitionIdentifier partitionIdentifier = entry.getKey(); - mutate( - partitionIdentifier.getNamespaceName(), - partitionIdentifier.getTableName(), - partitionIdentifier.getPartitionName(), - entry.getValue()); - } - } - - private void mutate( - String namespaceName, String tableName, String partitionKey, List mutations) - throws ExecutionException { - Map readVersionMap = new HashMap<>(); - Map partition = - getPartition(namespaceName, tableName, partitionKey, readVersionMap); - for (Mutation mutation : mutations) { - if (mutation instanceof Put) { - putInternal(partition, (Put) mutation); - } else { - assert mutation instanceof Delete; - deleteInternal(partition, (Delete) mutation); - } - } - applyPartitionWrite(namespaceName, tableName, partitionKey, partition, readVersionMap); - } - - private void putInternal(Map partition, Put put) - throws ExecutionException { - TableMetadata tableMetadata = metadataManager.getTableMetadata(put); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, tableMetadata); - if (!put.getCondition().isPresent()) { - ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); - if (existingRecord == null) { - partition.put(mutation.getRecordId(), mutation.makeRecord()); - } else { - partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); - } - } else if (put.getCondition().get() instanceof PutIfNotExists) { - if (partition.containsKey(mutation.getRecordId())) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); - } - partition.put(mutation.getRecordId(), mutation.makeRecord()); - } else if (put.getCondition().get() instanceof PutIfExists) { - ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); - if (existingRecord == null) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); - } - partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); - } else { - assert put.getCondition().get() instanceof PutIf; - ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); - if (existingRecord == null) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); - } - try { - validateConditions( - partition.get(mutation.getRecordId()), - put.getCondition().get().getExpressions(), - metadataManager.getTableMetadata(mutation.getOperation())); - } catch (ExecutionException e) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put), e); - } - partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); - } - } - - private void deleteInternal(Map partition, Delete delete) - throws ExecutionException { - TableMetadata tableMetadata = metadataManager.getTableMetadata(delete); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, tableMetadata); - if (!delete.getCondition().isPresent()) { - partition.remove(mutation.getRecordId()); - } else if (delete.getCondition().get() instanceof DeleteIfExists) { - if (!partition.containsKey(mutation.getRecordId())) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); - } - partition.remove(mutation.getRecordId()); - } else { - assert delete.getCondition().get() instanceof DeleteIf; - if (!partition.containsKey(mutation.getRecordId())) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); - } - try { - validateConditions( - partition.get(mutation.getRecordId()), - delete.getCondition().get().getExpressions(), - metadataManager.getTableMetadata(mutation.getOperation())); - } catch (ExecutionException e) { - throw new NoMutationException( - CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete), e); - } - partition.remove(mutation.getRecordId()); - } - } - - /** - * Applies the partition write. - * - * @param namespaceName the namespace name - * @param tableName the table name - * @param partitionKey the partition key - * @param partition the partition to be written - * @param readVersionMap the map of read versions - * @throws ExecutionException if a failure occurs during the operation - */ - private void applyPartitionWrite( - String namespaceName, - String tableName, - String partitionKey, - Map partition, - Map readVersionMap) - throws ExecutionException { - if (readVersionMap.containsKey( - PartitionIdentifier.of(namespaceName, tableName, partitionKey))) { - String readVersion = - readVersionMap.get(PartitionIdentifier.of(namespaceName, tableName, partitionKey)); - if (!partition.isEmpty()) { - updatePartition(namespaceName, tableName, partitionKey, partition, readVersion); - } else { - deletePartition(namespaceName, tableName, partitionKey, readVersion); - } - } else { - if (!partition.isEmpty()) { - insertPartition(namespaceName, tableName, partitionKey, partition); - } - } - } - - /** - * Gets a partition from the object storage. - * - * @param namespaceName the namespace name - * @param tableName the table name - * @param partitionKey the partition key - * @param readVersionMap the map to store the read version - * @return the partition - * @throws ExecutionException if a failure occurs during the operation - */ - private Map getPartition( - String namespaceName, - String tableName, - String partitionKey, - Map readVersionMap) - throws ExecutionException { - String objectKey = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey); - try { - Optional response = wrapper.get(objectKey); - if (!response.isPresent()) { - return new HashMap<>(); - } - readVersionMap.put( - PartitionIdentifier.of(namespaceName, tableName, partitionKey), - response.get().getVersion()); - return Serializer.deserialize( - response.get().getPayload(), new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } - } - - /** - * Inserts a partition into the object storage. This method is called after confirming that the - * partition does not exist. - * - * @param namespaceName the namespace name - * @param tableName the table name - * @param partitionKey the partition key - * @param partition the partition to be inserted - * @throws ExecutionException if a failure occurs during the operation - */ - private void insertPartition( - String namespaceName, - String tableName, - String partitionKey, - Map partition) - throws ExecutionException { - try { - wrapper.insert( - ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), - Serializer.serialize(partition)); - } catch (PreconditionFailedException e) { - throw new RetriableExecutionException( - CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } catch (ObjectStorageWrapperException e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } - } - - /** - * Updates a partition in the object storage. This method is called after confirming that the - * partition exists. - * - * @param namespaceName the namespace name - * @param tableName the table name - * @param partitionKey the partition key - * @param partition the partition to be updated - * @param readVersion the read version - * @throws ExecutionException if a failure occurs during the operation - */ - private void updatePartition( - String namespaceName, - String tableName, - String partitionKey, - Map partition, - String readVersion) - throws ExecutionException { - try { - wrapper.update( - ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), - Serializer.serialize(partition), - readVersion); - } catch (PreconditionFailedException e) { - throw new RetriableExecutionException( - CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } catch (ObjectStorageWrapperException e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } - } - - /** - * Deletes a partition from the object storage. This method is called after confirming that the - * partition exists. - * - * @param namespaceName the namespace name - * @param tableName the table name - * @param partitionKey the partition key - * @param readVersion the read version - * @throws ExecutionException if a failure occurs during the operation - */ - private void deletePartition( - String namespaceName, String tableName, String partitionKey, String readVersion) - throws ExecutionException { - try { - wrapper.delete( - ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), readVersion); - } catch (PreconditionFailedException e) { - throw new RetriableExecutionException( - CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } catch (ObjectStorageWrapperException e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); - } - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java deleted file mode 100644 index 4cb7e28654..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java +++ /dev/null @@ -1,142 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import static com.scalar.db.util.ScalarDbUtils.copyAndPrepareForDynamicFiltering; - -import com.scalar.db.api.Delete; -import com.scalar.db.api.Get; -import com.scalar.db.api.Mutation; -import com.scalar.db.api.Put; -import com.scalar.db.api.Result; -import com.scalar.db.api.Scan; -import com.scalar.db.api.Scanner; -import com.scalar.db.common.AbstractDistributedStorage; -import com.scalar.db.common.CoreError; -import com.scalar.db.common.FilterableScanner; -import com.scalar.db.common.StorageInfoProvider; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.common.checker.OperationChecker; -import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.exception.storage.ExecutionException; -import java.io.IOException; -import java.util.List; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ObjectStorage extends AbstractDistributedStorage { - private static final Logger logger = LoggerFactory.getLogger(ObjectStorage.class); - - private final ObjectStorageWrapper wrapper; - private final SelectStatementHandler selectStatementHandler; - private final MutateStatementHandler mutateStatementHandler; - private final OperationChecker operationChecker; - - public ObjectStorage(DatabaseConfig databaseConfig) { - super(databaseConfig); - if (databaseConfig.isCrossPartitionScanOrderingEnabled()) { - throw new IllegalArgumentException( - CoreError.OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED.buildMessage()); - } - ObjectStorageConfig objectStorageConfig = - ObjectStorageUtils.getObjectStorageConfig(databaseConfig); - wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); - ObjectStorageAdmin admin = new ObjectStorageAdmin(wrapper, objectStorageConfig); - TableMetadataManager metadataManager = - new TableMetadataManager(admin, databaseConfig.getMetadataCacheExpirationTimeSecs()); - operationChecker = - new ObjectStorageOperationChecker( - databaseConfig, metadataManager, new StorageInfoProvider(admin)); - selectStatementHandler = new SelectStatementHandler(wrapper, metadataManager); - mutateStatementHandler = new MutateStatementHandler(wrapper, metadataManager); - logger.info("ObjectStorage object is created properly"); - } - - @Override - public Optional get(Get get) throws ExecutionException { - get = copyAndSetTargetToIfNot(get); - operationChecker.check(get); - Scanner scanner = null; - try { - if (get.getConjunctions().isEmpty()) { - scanner = selectStatementHandler.handle(get); - } else { - scanner = - new FilterableScanner( - get, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(get))); - } - Optional ret = scanner.one(); - if (!scanner.one().isPresent()) { - return ret; - } else { - throw new IllegalArgumentException( - CoreError.GET_OPERATION_USED_FOR_NON_EXACT_MATCH_SELECTION.buildMessage(get)); - } - } finally { - if (scanner != null) { - try { - scanner.close(); - } catch (IOException e) { - logger.warn("Failed to close the scanner", e); - } - } - } - } - - @Override - public Scanner scan(Scan scan) throws ExecutionException { - scan = copyAndSetTargetToIfNot(scan); - operationChecker.check(scan); - if (scan.getConjunctions().isEmpty()) { - return selectStatementHandler.handle(scan); - } else { - return new FilterableScanner( - scan, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(scan))); - } - } - - @Override - public void put(Put put) throws ExecutionException { - put = copyAndSetTargetToIfNot(put); - operationChecker.check(put); - mutateStatementHandler.handle(put); - } - - @Override - public void put(List puts) throws ExecutionException { - mutate(puts); - } - - @Override - public void delete(Delete delete) throws ExecutionException { - delete = copyAndSetTargetToIfNot(delete); - operationChecker.check(delete); - mutateStatementHandler.handle(delete); - } - - @Override - public void delete(List deletes) throws ExecutionException { - mutate(deletes); - } - - @Override - public void mutate(List mutations) throws ExecutionException { - if (mutations.size() == 1) { - Mutation mutation = mutations.get(0); - if (mutation instanceof Put) { - put((Put) mutation); - return; - } else if (mutation instanceof Delete) { - delete((Delete) mutation); - return; - } - } - mutations = copyAndSetTargetToIfNot(mutations); - operationChecker.check(mutations); - mutateStatementHandler.handle(mutations); - } - - @Override - public void close() { - wrapper.close(); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java index b7e56d460c..684bd958b9 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -540,7 +540,7 @@ private void deleteMetadataTable(String table, String readVersion) throws Execut private void deleteTableData(String namespace, String table) throws ExecutionException { try { - wrapper.deleteByPrefix(ObjectStorageUtils.getObjectKey(namespace, table)); + wrapper.deleteByPrefix(ObjectStorageUtils.getObjectKey(namespace, table, "")); } catch (Exception e) { throw new ExecutionException( String.format( diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java index 875cce3bd1..adfd517208 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java @@ -1,6 +1,7 @@ package com.scalar.db.storage.objectstorage; public interface ObjectStorageConfig { + /** * Returns the storage name. * diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java deleted file mode 100644 index d64355fb80..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.Delete; -import com.scalar.db.api.Mutation; -import com.scalar.db.api.Put; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.Column; -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import javax.annotation.Nonnull; -import javax.annotation.concurrent.Immutable; - -@Immutable -public class ObjectStorageMutation extends ObjectStorageOperation { - ObjectStorageMutation(Mutation mutation, TableMetadata metadata) { - super(mutation, metadata); - } - - @Nonnull - public ObjectStorageRecord makeRecord() { - Mutation mutation = (Mutation) getOperation(); - - if (mutation instanceof Delete) { - return new ObjectStorageRecord(); - } - Put put = (Put) mutation; - - return new ObjectStorageRecord( - getRecordId(), - toMap(put.getPartitionKey().getColumns()), - put.getClusteringKey().map(k -> toMap(k.getColumns())).orElse(Collections.emptyMap()), - toMapForPut(put)); - } - - @Nonnull - public ObjectStorageRecord makeRecord(ObjectStorageRecord existingRecord) { - Mutation mutation = (Mutation) getOperation(); - - if (mutation instanceof Delete) { - return new ObjectStorageRecord(); - } - Put put = (Put) mutation; - - ObjectStorageRecord newRecord = new ObjectStorageRecord(existingRecord); - toMapForPut(put).forEach((k, v) -> newRecord.getValues().put(k, v)); - return newRecord; - } - - private Map toMap(Collection> columns) { - MapVisitor visitor = new MapVisitor(); - columns.forEach(c -> c.accept(visitor)); - return visitor.get(); - } - - private Map toMapForPut(Put put) { - MapVisitor visitor = new MapVisitor(); - put.getColumns().values().forEach(c -> c.accept(visitor)); - return visitor.get(); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java index 9f85e3e65b..024a7c419b 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java @@ -9,6 +9,7 @@ public class ObjectStorageNamespaceMetadata { private final String name; // The default constructor is required by Jackson to deserialize JSON object + @SuppressWarnings("unused") public ObjectStorageNamespaceMetadata() { this(null); } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java deleted file mode 100644 index d632009ae0..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.google.common.base.Joiner; -import com.scalar.db.api.Operation; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.Column; -import java.util.HashMap; -import java.util.Map; -import javax.annotation.Nonnull; -import javax.annotation.concurrent.Immutable; - -@Immutable -public class ObjectStorageOperation { - private final Operation operation; - private final TableMetadata metadata; - - public ObjectStorageOperation(Operation operation, TableMetadata metadata) { - this.operation = operation; - this.metadata = metadata; - } - - @Nonnull - public Operation getOperation() { - return operation; - } - - @Nonnull - public String getConcatenatedPartitionKey() { - Map> keyMap = new HashMap<>(); - operation.getPartitionKey().getColumns().forEach(c -> keyMap.put(c.getName(), c)); - - ConcatenationVisitor visitor = new ConcatenationVisitor(); - metadata.getPartitionKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); - - return visitor.build(); - } - - @Nonnull - public String getConcatenatedClusteringKey() { - Map> keyMap = new HashMap<>(); - operation - .getClusteringKey() - .ifPresent(k -> k.getColumns().forEach(c -> keyMap.put(c.getName(), c))); - - ConcatenationVisitor visitor = new ConcatenationVisitor(); - metadata.getClusteringKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); - - return visitor.build(); - } - - @Nonnull - public String getRecordId() { - if (operation.getClusteringKey().isPresent()) { - return String.join( - String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), - getConcatenatedPartitionKey(), - getConcatenatedClusteringKey()); - } - return getConcatenatedPartitionKey(); - } - - @SafeVarargs - public final void checkArgument(Class... expected) { - for (Class e : expected) { - if (e.isInstance(operation)) { - return; - } - } - throw new IllegalArgumentException( - Joiner.on(" ") - .join( - new String[] { - operation.getClass().toString(), "is passed where something like", - expected[0].toString(), "is expected" - })); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java deleted file mode 100644 index c97b410f6a..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java +++ /dev/null @@ -1,154 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.ConditionalExpression; -import com.scalar.db.api.Delete; -import com.scalar.db.api.Get; -import com.scalar.db.api.Mutation; -import com.scalar.db.api.Operation; -import com.scalar.db.api.Put; -import com.scalar.db.api.Scan; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.CoreError; -import com.scalar.db.common.StorageInfoProvider; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.common.checker.OperationChecker; -import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.exception.storage.ExecutionException; -import com.scalar.db.io.BigIntColumn; -import com.scalar.db.io.BlobColumn; -import com.scalar.db.io.BooleanColumn; -import com.scalar.db.io.ColumnVisitor; -import com.scalar.db.io.DataType; -import com.scalar.db.io.DateColumn; -import com.scalar.db.io.DoubleColumn; -import com.scalar.db.io.FloatColumn; -import com.scalar.db.io.IntColumn; -import com.scalar.db.io.TextColumn; -import com.scalar.db.io.TimeColumn; -import com.scalar.db.io.TimestampColumn; -import com.scalar.db.io.TimestampTZColumn; - -public class ObjectStorageOperationChecker extends OperationChecker { - private static final char[] ILLEGAL_CHARACTERS_IN_PRIMARY_KEY = { - ObjectStorageUtils.OBJECT_KEY_DELIMITER, ObjectStorageUtils.CONCATENATED_KEY_DELIMITER, - }; - - private static final ColumnVisitor PRIMARY_KEY_COLUMN_CHECKER = - new ColumnVisitor() { - @Override - public void visit(BooleanColumn column) {} - - @Override - public void visit(IntColumn column) {} - - @Override - public void visit(BigIntColumn column) {} - - @Override - public void visit(FloatColumn column) {} - - @Override - public void visit(DoubleColumn column) {} - - @Override - public void visit(TextColumn column) { - String value = column.getTextValue(); - assert value != null; - - for (char illegalCharacter : ILLEGAL_CHARACTERS_IN_PRIMARY_KEY) { - if (value.indexOf(illegalCharacter) != -1) { - throw new IllegalArgumentException( - CoreError.OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER.buildMessage( - column.getName(), value)); - } - } - } - - @Override - public void visit(BlobColumn column) {} - - @Override - public void visit(DateColumn column) {} - - @Override - public void visit(TimeColumn column) {} - - @Override - public void visit(TimestampColumn column) {} - - @Override - public void visit(TimestampTZColumn column) {} - }; - - public ObjectStorageOperationChecker( - DatabaseConfig databaseConfig, - TableMetadataManager metadataManager, - StorageInfoProvider storageInfoProvider) { - super(databaseConfig, metadataManager, storageInfoProvider); - } - - @Override - public void check(Get get) throws ExecutionException { - super.check(get); - checkPrimaryKey(get); - } - - @Override - public void check(Scan scan) throws ExecutionException { - super.check(scan); - checkPrimaryKey(scan); - scan.getStartClusteringKey() - .ifPresent( - c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); - scan.getEndClusteringKey() - .ifPresent( - c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); - } - - @Override - public void check(Put put) throws ExecutionException { - super.check(put); - checkPrimaryKey(put); - - TableMetadata metadata = getTableMetadata(put); - checkCondition(put, metadata); - } - - @Override - public void check(Delete delete) throws ExecutionException { - super.check(delete); - checkPrimaryKey(delete); - - TableMetadata metadata = getTableMetadata(delete); - checkCondition(delete, metadata); - } - - private void checkPrimaryKey(Operation operation) { - operation - .getPartitionKey() - .getColumns() - .forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER)); - operation - .getClusteringKey() - .ifPresent( - c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); - } - - private void checkCondition(Mutation mutation, TableMetadata metadata) { - if (!mutation.getCondition().isPresent()) { - return; - } - for (ConditionalExpression expression : mutation.getCondition().get().getExpressions()) { - if (metadata.getColumnDataType(expression.getColumn().getName()) == DataType.BOOLEAN) { - if (expression.getOperator() != ConditionalExpression.Operator.EQ - && expression.getOperator() != ConditionalExpression.Operator.NE - && expression.getOperator() != ConditionalExpression.Operator.IS_NULL - && expression.getOperator() != ConditionalExpression.Operator.IS_NOT_NULL) { - throw new IllegalArgumentException( - CoreError.OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BOOLEAN_TYPE - .buildMessage(mutation)); - } - } - } - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java index 756a06d517..1f7b94275b 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java @@ -7,9 +7,10 @@ import com.scalar.db.config.DatabaseConfig; public interface ObjectStorageProvider extends DistributedStorageProvider { + @Override default DistributedStorage createDistributedStorage(DatabaseConfig config) { - return new ObjectStorage(config); + return null; } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java deleted file mode 100644 index 4caad8906a..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import javax.annotation.Nullable; -import javax.annotation.concurrent.Immutable; - -@Immutable -public class ObjectStorageRecord { - private final String id; - private final Map partitionKey; - private final Map clusteringKey; - private final Map values; - - // The default constructor is required by Jackson to deserialize JSON object - public ObjectStorageRecord() { - this(null, null, null, null); - } - - public ObjectStorageRecord( - @Nullable String id, - @Nullable Map partitionKey, - @Nullable Map clusteringKey, - @Nullable Map values) { - this.id = id != null ? id : ""; - this.partitionKey = partitionKey != null ? partitionKey : Collections.emptyMap(); - this.clusteringKey = clusteringKey != null ? clusteringKey : Collections.emptyMap(); - this.values = values != null ? values : Collections.emptyMap(); - } - - public ObjectStorageRecord(ObjectStorageRecord record) { - this(record.getId(), record.getPartitionKey(), record.getClusteringKey(), record.getValues()); - } - - public String getId() { - return id; - } - - public Map getPartitionKey() { - return partitionKey; - } - - public Map getClusteringKey() { - return clusteringKey; - } - - public Map getValues() { - return values; - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (!(o instanceof ObjectStorageRecord)) { - return false; - } - ObjectStorageRecord other = (ObjectStorageRecord) o; - if (!other.getId().equals(id)) { - return false; - } - if (!other.getPartitionKey().equals(partitionKey)) { - return false; - } - if (!other.getClusteringKey().equals(clusteringKey)) { - return false; - } - return other.getValues().equals(values); - } - - @Override - public int hashCode() { - return Objects.hash(id, partitionKey, clusteringKey, values); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 23a684f665..18d978cd12 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -59,6 +59,15 @@ public ObjectStorageTableMetadata(TableMetadata tableMetadata) { this.columns = columnTypeByName; } + private ObjectStorageTableMetadata(Builder builder) { + this( + builder.partitionKeyNames, + builder.clusteringKeyNames, + builder.clusteringOrders, + builder.secondaryIndexNames, + builder.columns); + } + public LinkedHashSet getPartitionKeyNames() { return partitionKeyNames; } @@ -139,4 +148,47 @@ private DataType convertDataType(String columnType) { throw new AssertionError("Unknown column type: " + columnType); } } + + public static ObjectStorageTableMetadata.Builder newBuilder() { + return new ObjectStorageTableMetadata.Builder(); + } + + public static final class Builder { + private LinkedHashSet partitionKeyNames; + private LinkedHashSet clusteringKeyNames; + private Map clusteringOrders; + private Set secondaryIndexNames; + private Map columns; + + private Builder() {} + + public ObjectStorageTableMetadata.Builder partitionKeyNames(LinkedHashSet val) { + partitionKeyNames = val; + return this; + } + + public ObjectStorageTableMetadata.Builder clusteringKeyNames(LinkedHashSet val) { + clusteringKeyNames = val; + return this; + } + + public ObjectStorageTableMetadata.Builder clusteringOrders(Map val) { + clusteringOrders = val; + return this; + } + + public ObjectStorageTableMetadata.Builder secondaryIndexNames(Set val) { + secondaryIndexNames = val; + return this; + } + + public ObjectStorageTableMetadata.Builder columns(Map val) { + columns = val; + return this; + } + + public ObjectStorageTableMetadata build() { + return new ObjectStorageTableMetadata(this); + } + } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java index 0160f27cf8..b207f75b2b 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java @@ -8,6 +8,7 @@ import java.util.Objects; public class ObjectStorageWrapperFactory { + public static ObjectStorageWrapper create(ObjectStorageConfig objectStorageConfig) { if (Objects.equals(objectStorageConfig.getStorageName(), BlobConfig.STORAGE_NAME)) { assert objectStorageConfig instanceof BlobConfig; diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java deleted file mode 100644 index 41d65deb90..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -public class PartitionIdentifier { - private final String namespaceName; - private final String tableName; - private final String partitionName; - - public PartitionIdentifier(String namespaceName, String tableName, String partitionName) { - this.namespaceName = namespaceName; - this.tableName = tableName; - this.partitionName = partitionName; - } - - public static PartitionIdentifier of( - String namespaceName, String tableName, String partitionName) { - return new PartitionIdentifier(namespaceName, tableName, partitionName); - } - - public String getNamespaceName() { - return namespaceName; - } - - public String getTableName() { - return tableName; - } - - public String getPartitionName() { - return partitionName; - } - - @Override - public int hashCode() { - return (namespaceName + tableName + partitionName).hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!(obj instanceof PartitionIdentifier)) return false; - PartitionIdentifier other = (PartitionIdentifier) obj; - return namespaceName.equals(other.namespaceName) - && tableName.equals(other.tableName) - && partitionName.equals(other.partitionName); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java deleted file mode 100644 index 19246231c0..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.Result; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.ResultImpl; -import com.scalar.db.io.Column; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import javax.annotation.concurrent.ThreadSafe; - -@ThreadSafe -public class ResultInterpreter { - private final List projections; - private final TableMetadata metadata; - - @SuppressFBWarnings("EI_EXPOSE_REP2") - public ResultInterpreter(List projections, TableMetadata metadata) { - this.projections = Objects.requireNonNull(projections); - this.metadata = Objects.requireNonNull(metadata); - } - - public Result interpret(ObjectStorageRecord record) { - Map> ret = new HashMap<>(); - - if (projections.isEmpty()) { - metadata.getColumnNames().forEach(name -> add(ret, name, record, metadata)); - } else { - projections.forEach(name -> add(ret, name, record, metadata)); - } - - return new ResultImpl(ret, metadata); - } - - private void add( - Map> columns, - String name, - ObjectStorageRecord record, - TableMetadata metadata) { - Object value; - if (record.getPartitionKey().containsKey(name)) { - value = record.getPartitionKey().get(name); - } else if (record.getClusteringKey().containsKey(name)) { - value = record.getClusteringKey().get(name); - } else { - value = record.getValues().get(name); - } - - columns.put(name, ColumnValueMapper.convert(value, name, metadata.getColumnDataType(name))); - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java deleted file mode 100644 index d7d14a39f0..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.api.Result; -import com.scalar.db.common.AbstractScanner; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Optional; -import javax.annotation.Nonnull; -import javax.annotation.concurrent.NotThreadSafe; - -@NotThreadSafe -public class ScannerImpl extends AbstractScanner { - private final Iterator recordIterator; - private final ResultInterpreter resultInterpreter; - private final int recordCountLimit; - - private int recordCount; - - @SuppressFBWarnings("EI_EXPOSE_REP2") - public ScannerImpl( - Iterator recordIterator, - ResultInterpreter resultInterpreter, - int recordCountLimit) { - this.recordIterator = recordIterator; - this.resultInterpreter = resultInterpreter; - this.recordCountLimit = recordCountLimit; - this.recordCount = 0; - } - - @Override - @Nonnull - public Optional one() { - if (!recordIterator.hasNext()) { - return Optional.empty(); - } - if (recordCountLimit != 0 && recordCount >= recordCountLimit) { - return Optional.empty(); - } - recordCount++; - return Optional.of(resultInterpreter.interpret(recordIterator.next())); - } - - @Override - @Nonnull - public List all() { - List results = new ArrayList<>(); - Optional result; - while ((result = one()).isPresent()) { - results.add(result.get()); - } - return results; - } - - @Override - public void close() throws IOException {} -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java deleted file mode 100644 index 8c14a278e4..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java +++ /dev/null @@ -1,294 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.google.common.collect.Ordering; -import com.scalar.db.api.Get; -import com.scalar.db.api.Scan; -import com.scalar.db.api.ScanAll; -import com.scalar.db.api.Scanner; -import com.scalar.db.api.Selection; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.CoreError; -import com.scalar.db.common.EmptyScanner; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.exception.storage.ExecutionException; -import com.scalar.db.io.Column; -import com.scalar.db.io.Key; -import com.scalar.db.util.ScalarDbUtils; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; -import javax.annotation.Nonnull; -import javax.annotation.concurrent.ThreadSafe; - -@ThreadSafe -public class SelectStatementHandler extends StatementHandler { - public SelectStatementHandler( - ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { - super(wrapper, metadataManager); - } - - @Nonnull - public Scanner handle(Selection selection) throws ExecutionException { - TableMetadata tableMetadata = metadataManager.getTableMetadata(selection); - if (selection instanceof Get) { - if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { - throw new UnsupportedOperationException( - CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); - } else { - return executeGet((Get) selection, tableMetadata); - } - } else { - if (selection instanceof ScanAll) { - return executeScanAll((ScanAll) selection, tableMetadata); - } else if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { - throw new UnsupportedOperationException( - CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); - } else { - return executeScan((Scan) selection, tableMetadata); - } - } - } - - private Scanner executeGet(Get get, TableMetadata metadata) throws ExecutionException { - ObjectStorageOperation operation = new ObjectStorageOperation(get, metadata); - operation.checkArgument(Get.class); - Optional record = - getRecord( - getNamespace(get), - getTable(get), - operation.getConcatenatedPartitionKey(), - operation.getRecordId()); - if (!record.isPresent()) { - return new EmptyScanner(); - } - return new ScannerImpl( - Collections.singletonList(record.get()).iterator(), - new ResultInterpreter(get.getProjections(), metadata), - 1); - } - - private Scanner executeScan(Scan scan, TableMetadata metadata) throws ExecutionException { - ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); - operation.checkArgument(Scan.class); - List records = - new ArrayList<>( - getRecordsInPartition( - getNamespace(scan), getTable(scan), operation.getConcatenatedPartitionKey())); - - records.sort( - (o1, o2) -> - new ClusteringKeyComparator(metadata) - .compare(o1.getClusteringKey(), o2.getClusteringKey())); - if (isReverseOrder(scan, metadata)) { - Collections.reverse(records); - } - - // If the scan is for DESC clustering order, use the end clustering key as a start key and the - // start clustering key as an end key - boolean scanForDescClusteringOrder = isScanForDescClusteringOrder(scan, metadata); - Optional startKey = - scanForDescClusteringOrder ? scan.getEndClusteringKey() : scan.getStartClusteringKey(); - boolean startInclusive = - scanForDescClusteringOrder ? scan.getEndInclusive() : scan.getStartInclusive(); - Optional endKey = - scanForDescClusteringOrder ? scan.getStartClusteringKey() : scan.getEndClusteringKey(); - boolean endInclusive = - scanForDescClusteringOrder ? scan.getStartInclusive() : scan.getEndInclusive(); - - if (startKey.isPresent()) { - records = - filterRecordsByClusteringKeyBoundary( - records, startKey.get(), true, startInclusive, metadata); - } - if (endKey.isPresent()) { - records = - filterRecordsByClusteringKeyBoundary( - records, endKey.get(), false, endInclusive, metadata); - } - - if (scan.getLimit() > 0) { - records = records.subList(0, Math.min(scan.getLimit(), records.size())); - } - - return new ScannerImpl( - records.iterator(), - new ResultInterpreter(scan.getProjections(), metadata), - scan.getLimit()); - } - - private Scanner executeScanAll(ScanAll scan, TableMetadata metadata) throws ExecutionException { - ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); - operation.checkArgument(ScanAll.class); - Set records = getRecordsInTable(getNamespace(scan), getTable(scan)); - if (scan.getLimit() > 0) { - records = records.stream().limit(scan.getLimit()).collect(Collectors.toSet()); - } - return new ScannerImpl( - records.iterator(), - new ResultInterpreter(scan.getProjections(), metadata), - scan.getLimit()); - } - - private Map getPartition( - String namespace, String table, String partition) throws ObjectStorageWrapperException { - Optional response = - wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); - if (!response.isPresent()) { - return Collections.emptyMap(); - } - return Serializer.deserialize( - response.get().getPayload(), new TypeReference>() {}); - } - - private Optional getRecord( - String namespace, String table, String partition, String recordId) throws ExecutionException { - try { - Map recordsInPartition = - getPartition(namespace, table, partition); - if (recordsInPartition.containsKey(recordId)) { - return Optional.of(recordsInPartition.get(recordId)); - } else { - return Optional.empty(); - } - } catch (Exception e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); - } - } - - private Set getRecordsInPartition( - String namespace, String table, String partition) throws ExecutionException { - try { - Map recordsInPartition = - getPartition(namespace, table, partition); - return new HashSet<>(recordsInPartition.values()); - } catch (Exception e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); - } - } - - private Set getRecordsInTable(String namespace, String table) - throws ExecutionException { - try { - Set partitionNames = - wrapper.getKeys(ObjectStorageUtils.getObjectKey(namespace, table, "")).stream() - .map( - key -> - key.substring(key.lastIndexOf(ObjectStorageUtils.OBJECT_KEY_DELIMITER) + 1)) - .filter(partition -> !partition.isEmpty()) - .collect(Collectors.toSet()); - Set records = new HashSet<>(); - for (String key : partitionNames) { - records.addAll(getRecordsInPartition(namespace, table, key)); - } - return records; - } catch (Exception e) { - throw new ExecutionException( - CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); - } - } - - private boolean isReverseOrder(Scan scan, TableMetadata metadata) { - Boolean reverse = null; - Iterator iterator = metadata.getClusteringKeyNames().iterator(); - for (Scan.Ordering ordering : scan.getOrderings()) { - String clusteringKeyName = iterator.next(); - if (!ordering.getColumnName().equals(clusteringKeyName)) { - throw new IllegalArgumentException( - CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); - } - boolean rightOrder = - ordering.getOrder() != metadata.getClusteringOrder(ordering.getColumnName()); - if (reverse == null) { - reverse = rightOrder; - } else { - if (reverse != rightOrder) { - throw new IllegalArgumentException( - CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); - } - } - } - return reverse != null && reverse; - } - - private boolean isScanForDescClusteringOrder(Scan scan, TableMetadata tableMetadata) { - if (scan.getStartClusteringKey().isPresent()) { - Key startClusteringKey = scan.getStartClusteringKey().get(); - String lastValueName = - startClusteringKey.getColumns().get(startClusteringKey.size() - 1).getName(); - return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; - } - if (scan.getEndClusteringKey().isPresent()) { - Key endClusteringKey = scan.getEndClusteringKey().get(); - String lastValueName = - endClusteringKey.getColumns().get(endClusteringKey.size() - 1).getName(); - return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; - } - return false; - } - - private List filterRecordsByClusteringKeyBoundary( - List records, - Key clusteringKey, - boolean isStart, - boolean isInclusive, - TableMetadata metadata) { - for (Column column : clusteringKey.getColumns()) { - Scan.Ordering.Order order = metadata.getClusteringOrder(column.getName()); - if (clusteringKey.getColumns().indexOf(column) == clusteringKey.size() - 1) { - return records.stream() - .filter( - record -> { - Column recordColumn = - ColumnValueMapper.convert( - record.getClusteringKey().get(column.getName()), - column.getName(), - column.getDataType()); - int cmp = Ordering.natural().compare(recordColumn, column); - cmp = order == Scan.Ordering.Order.ASC ? cmp : -cmp; - if (isStart) { - if (isInclusive) { - return cmp >= 0; - } else { - return cmp > 0; - } - } else { - if (isInclusive) { - return cmp <= 0; - } else { - return cmp < 0; - } - } - }) - .collect(Collectors.toList()); - } else { - List tmpRecords = new ArrayList<>(); - records.forEach( - record -> { - Column recordColumn = - ColumnValueMapper.convert( - record.getClusteringKey().get(column.getName()), - column.getName(), - column.getDataType()); - int cmp = Ordering.natural().compare(recordColumn, column); - if (cmp == 0) { - tmpRecords.add(record); - } - }); - if (tmpRecords.isEmpty()) { - return Collections.emptyList(); - } - records = tmpRecords; - } - } - return records; - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java deleted file mode 100644 index 02739d4ec1..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java +++ /dev/null @@ -1,131 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.google.common.collect.Ordering; -import com.scalar.db.api.ConditionalExpression; -import com.scalar.db.api.Operation; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.exception.storage.ExecutionException; -import com.scalar.db.io.Column; -import java.util.List; -import javax.annotation.Nonnull; - -public class StatementHandler { - protected final ObjectStorageWrapper wrapper; - protected final TableMetadataManager metadataManager; - - public StatementHandler(ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { - this.wrapper = wrapper; - this.metadataManager = metadataManager; - } - - @Nonnull - protected String getNamespace(Operation operation) { - assert operation.forNamespace().isPresent(); - return operation.forNamespace().get(); - } - - @Nonnull - protected String getTable(Operation operation) { - assert operation.forTable().isPresent(); - return operation.forTable().get(); - } - - protected void validateConditions( - ObjectStorageRecord record, List expressions, TableMetadata metadata) - throws ExecutionException { - for (ConditionalExpression expression : expressions) { - Column expectedColumn = expression.getColumn(); - Column actualColumn = - ColumnValueMapper.convert( - record.getValues().get(expectedColumn.getName()), - expectedColumn.getName(), - metadata.getColumnDataType(expectedColumn.getName())); - boolean validationFailed = false; - switch (expression.getOperator()) { - case EQ: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - if (Ordering.natural().compare(actualColumn, expectedColumn) != 0) { - validationFailed = true; - break; - } - break; - case NE: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - if (Ordering.natural().compare(actualColumn, expectedColumn) == 0) { - validationFailed = true; - break; - } - break; - case GT: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - if (Ordering.natural().compare(actualColumn, expectedColumn) <= 0) { - validationFailed = true; - break; - } - break; - case GTE: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - if (Ordering.natural().compare(actualColumn, expectedColumn) < 0) { - validationFailed = true; - break; - } - break; - case LT: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - if (Ordering.natural().compare(actualColumn, expectedColumn) >= 0) { - validationFailed = true; - break; - } - break; - case LTE: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - if (Ordering.natural().compare(actualColumn, expectedColumn) > 0) { - validationFailed = true; - break; - } - break; - case IS_NULL: - if (!actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - break; - case IS_NOT_NULL: - if (actualColumn.hasNullValue()) { - validationFailed = true; - break; - } - break; - case LIKE: - case NOT_LIKE: - default: - throw new AssertionError("Unsupported operator"); - } - if (validationFailed) { - throw new ExecutionException( - String.format( - "A condition failed. ConditionalExpression: %s, Column: %s", - expectedColumn, actualColumn)); - } - } - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java index ab1ffc4f35..e4676bebfb 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java @@ -3,6 +3,7 @@ import com.scalar.db.storage.objectstorage.ObjectStorageProvider; public class BlobProvider implements ObjectStorageProvider { + @Override public String getName() { return BlobConfig.STORAGE_NAME; diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java deleted file mode 100644 index c4f3ed455e..0000000000 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java +++ /dev/null @@ -1,992 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.scalar.db.api.ConditionBuilder; -import com.scalar.db.api.Delete; -import com.scalar.db.api.Operation; -import com.scalar.db.api.Put; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.exception.storage.ExecutionException; -import com.scalar.db.exception.storage.NoMutationException; -import com.scalar.db.io.DataType; -import com.scalar.db.io.Key; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class MutateStatementHandlerTest { - private static final String ANY_NAMESPACE_NAME = "namespace"; - private static final String ANY_TABLE_NAME = "table"; - private static final String ANY_NAME_1 = "name1"; - private static final String ANY_NAME_2 = "name2"; - private static final String ANY_NAME_3 = "name3"; - private static final String ANY_NAME_4 = "name4"; - private static final String ANY_TEXT_1 = "text1"; - private static final String ANY_TEXT_2 = "text2"; - private static final int ANY_INT_1 = 1; - private static final int ANY_INT_2 = 2; - private static final String VERSION = "version1"; - - private MutateStatementHandler handler; - @Mock private ObjectStorageWrapper wrapper; - @Mock private TableMetadataManager metadataManager; - @Mock private TableMetadata metadata; - - @Captor private ArgumentCaptor objectKeyCaptor; - @Captor private ArgumentCaptor payloadCaptor; - @Captor private ArgumentCaptor versionCaptor; - - @BeforeEach - public void setUp() throws Exception { - MockitoAnnotations.openMocks(this).close(); - - handler = new MutateStatementHandler(wrapper, metadataManager); - - when(metadataManager.getTableMetadata(any(Operation.class))).thenReturn(metadata); - when(metadata.getPartitionKeyNames()) - .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); - when(metadata.getClusteringKeyNames()) - .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); - when(metadata.getColumnDataType(ANY_NAME_3)).thenReturn(DataType.INT); - when(metadata.getColumnDataType(ANY_NAME_4)).thenReturn(DataType.INT); - } - - private Put preparePut() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); - return Put.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .intValue(ANY_NAME_3, ANY_INT_1) - .intValue(ANY_NAME_4, ANY_INT_2) - .build(); - } - - private Put preparePutWithoutClusteringKey() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - return Put.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .intValue(ANY_NAME_3, ANY_INT_1) - .intValue(ANY_NAME_4, ANY_INT_2) - .build(); - } - - private Delete prepareDelete() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); - return Delete.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .build(); - } - - private Delete prepareDeleteWithoutClusteringKey() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - return Delete.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .build(); - } - - private ObjectStorageRecord prepareExistingRecord() { - Map values = new HashMap<>(); - values.put(ANY_NAME_3, ANY_INT_1); - values.put(ANY_NAME_4, ANY_INT_2); - return new ObjectStorageRecord("concat_key", null, null, values); - } - - @Test - public void handle_PutWithoutConditionsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() - throws Exception { - // Arrange - Put put = preparePut(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void handle_PutWithoutConditionsGiven_WhenPartitionExists_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Put put = preparePut(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void - handle_PutWithoutClusteringKeyGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() - throws Exception { - // Arrange - Put put = preparePutWithoutClusteringKey(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void handle_PutWithoutClusteringKeyGiven_WhenPartitionExists_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Put put = preparePutWithoutClusteringKey(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(put); - - // Assert - verify(wrapper).get(objectKeyCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void handle_PutWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() - throws Exception { - // Arrange - Put put = preparePut(); - ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); - when(wrapper.get(anyString())).thenThrow(exception); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)) - .isInstanceOf(ExecutionException.class) - .hasCause(exception); - } - - @Test - public void handle_PutIfNotExistsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() - throws Exception { - // Arrange - Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void - handle_PutIfNotExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void - handle_PutIfNotExistsGiven_WhenPartitionAndRecordExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); - Map partition = new HashMap<>(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); - } - - @Test - public void handle_PutIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); - } - - @Test - public void - handle_PutIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); - } - - @Test - public void handle_PutIfExistsGiven_WhenPartitionAndRecordExist_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void - handle_PutIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Put put = - Put.newBuilder(preparePut()) - .condition( - ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); - } - - @Test - public void - handle_PutIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Put put = - Put.newBuilder(preparePut()) - .condition( - ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); - } - - @Test - public void - handle_PutIfGiven_WhenConditionMatchesAndPartitionAndRecordExist_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Put put = - Put.newBuilder(preparePut()) - .condition( - ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(put); - - // Assert - assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId()); - } - - @Test - public void handle_PutIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Put put = - Put.newBuilder(preparePut()) - .condition( - ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) - .build()) - .build(); - Map partition = new HashMap<>(); - ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); - } - - private void assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( - String expectedObjectKey, String expectedConcatenatedKey) - throws ObjectStorageWrapperException { - verify(wrapper).get(objectKeyCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - Map insertedPartition = - Serializer.deserialize( - payloadCaptor.getValue(), new TypeReference>() {}); - assertThat(insertedPartition).containsKey(expectedConcatenatedKey); - assertThat(insertedPartition.get(expectedConcatenatedKey).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - } - - private void assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( - String expectedObjectKey, String expectedConcatenatedKey) - throws ObjectStorageWrapperException { - verify(wrapper) - .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - Map updatedPartition = - Serializer.deserialize( - payloadCaptor.getValue(), new TypeReference>() {}); - assertThat(updatedPartition).containsKey(expectedConcatenatedKey); - assertThat(updatedPartition.get(expectedConcatenatedKey).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(versionCaptor.getValue()).isEqualTo(VERSION); - } - - @Test - public void - handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Delete delete = prepareDelete(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String expectedExistingRecordKey = "existing_record_key"; - partition.put(expectedExistingRecordKey, prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getConcatenatedPartitionKey(), expectedExistingRecordKey); - } - - @Test - public void handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() - throws Exception { - // Arrange - Delete delete = prepareDelete(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); - } - - @Test - public void - handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Delete delete = prepareDeleteWithoutClusteringKey(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String expectedExistingRecordKey = "existing_record_key"; - partition.put(expectedExistingRecordKey, prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); - } - - @Test - public void - handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() - throws Exception { - // Arrange - Delete delete = prepareDeleteWithoutClusteringKey(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); - } - - @Test - public void handle_DeleteWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() - throws Exception { - // Arrange - Delete delete = prepareDelete(); - ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); - when(wrapper.get(anyString())).thenThrow(exception); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(delete)) - .isInstanceOf(ExecutionException.class) - .hasCause(exception); - } - - @Test - public void handle_DeleteIfExistsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String expectedExistingRecordKey = "existing_record_key"; - partition.put(expectedExistingRecordKey, prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); - } - - @Test - public void handle_DeleteIfExistsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); - } - - @Test - public void handle_DeleteIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); - } - - @Test - public void - handle_DeleteIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); - } - - @Test - public void - handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()) - .condition( - ConditionBuilder.deleteIf( - ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String expectedExistingRecordKey = "existing_record_key"; - partition.put(expectedExistingRecordKey, prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( - expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); - } - - @Test - public void - handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndPartitionIsEmpty_ShouldCallWrapperDelete() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()) - .condition( - ConditionBuilder.deleteIf( - ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(delete); - - // Assert - assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); - } - - @Test - public void - handle_DeleteIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()) - .condition( - ConditionBuilder.deleteIf( - ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); - } - - @Test - public void - handle_DeleteIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()) - .condition( - ConditionBuilder.deleteIf( - ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) - .build()) - .build(); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); - } - - @Test - public void handle_DeleteIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() - throws Exception { - // Arrange - Delete delete = - Delete.newBuilder(prepareDelete()) - .condition( - ConditionBuilder.deleteIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) - .build()) - .build(); - ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); - Map partition = new HashMap<>(); - partition.put(mutation.getRecordId(), prepareExistingRecord()); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act & Assert - assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); - } - - private void assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( - String expectedObjectKey, String expectedConcatenatedKey, String expectedExistingRecordKey) - throws ObjectStorageWrapperException { - verify(wrapper) - .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - Map updatedPartition = - Serializer.deserialize( - payloadCaptor.getValue(), new TypeReference>() {}); - assertThat(updatedPartition).doesNotContainKey(expectedConcatenatedKey); - assertThat(updatedPartition).containsKey(expectedExistingRecordKey); - } - - private void assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete( - String expectedObjectKey) throws ObjectStorageWrapperException { - verify(wrapper).delete(objectKeyCaptor.capture(), versionCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - assertThat(versionCaptor.getValue()).isEqualTo(VERSION); - } - - @Test - public void - handle_MultipleMutationsForSinglePartitionGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() - throws Exception { - // Arrange - Put put1 = preparePut(); - Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); - Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); - Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); - ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); - ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); - ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); - ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act - handler.handle(Arrays.asList(put1, put2, put3, put4)); - - // Assert - verify(wrapper).get(objectKeyCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - - Map insertedPartition = - Serializer.deserialize( - payloadCaptor.getValue(), new TypeReference>() {}); - assertThat(insertedPartition).containsKey(mutation1.getRecordId()); - assertThat(insertedPartition.get(mutation1.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(insertedPartition).containsKey(mutation2.getRecordId()); - assertThat(insertedPartition.get(mutation2.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(insertedPartition).containsKey(mutation3.getRecordId()); - assertThat(insertedPartition.get(mutation3.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(insertedPartition).containsKey(mutation4.getRecordId()); - assertThat(insertedPartition.get(mutation4.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - } - - @Test - public void - handle_MultipleMutationsForSinglePartitionGiven_WhenPartitionExists_ShouldCallWrapperUpdate() - throws Exception { - // Arrange - Put put1 = preparePut(); - Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); - Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); - Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); - ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); - ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); - ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); - ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); - String expectedObjectKey = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); - Map partition = new HashMap<>(); - String serializedPartition = Serializer.serialize(partition); - ObjectStorageWrapperResponse response = - new ObjectStorageWrapperResponse(serializedPartition, VERSION); - when(wrapper.get(anyString())).thenReturn(Optional.of(response)); - - // Act - handler.handle(Arrays.asList(put1, put2, put3, put4)); - - // Assert - verify(wrapper) - .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); - assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); - Map updatedPartition = - Serializer.deserialize( - payloadCaptor.getValue(), new TypeReference>() {}); - assertThat(updatedPartition).containsKey(mutation1.getRecordId()); - assertThat(updatedPartition.get(mutation1.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(updatedPartition).containsKey(mutation2.getRecordId()); - assertThat(updatedPartition.get(mutation2.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(updatedPartition).containsKey(mutation3.getRecordId()); - assertThat(updatedPartition.get(mutation3.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(updatedPartition).containsKey(mutation4.getRecordId()); - assertThat(updatedPartition.get(mutation4.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(versionCaptor.getValue()).isEqualTo(VERSION); - } - - @Test - public void - handle_MultipleMutationsForDifferentPartitionGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() - throws Exception { - // Arrange - Put put1 = preparePut(); - Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); - Put put3 = - Put.newBuilder(preparePut()) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) - .clusteringKey(Key.ofText(ANY_NAME_2, "put3")) - .build(); - Put put4 = - Put.newBuilder(preparePut()) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) - .clusteringKey(Key.ofText(ANY_NAME_2, "put4")) - .build(); - ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); - ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); - ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); - ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); - String expectedObjectKey1 = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); - String expectedObjectKey2 = - ObjectStorageUtils.getObjectKey( - ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation3.getConcatenatedPartitionKey()); - when(wrapper.get(anyString())).thenReturn(Optional.empty()); - - // Act - handler.handle(Arrays.asList(put1, put2, put3, put4)); - - // Assert - verify(wrapper, times(2)).get(objectKeyCaptor.capture()); - List capturedObjectKeys = objectKeyCaptor.getAllValues(); - assertThat(capturedObjectKeys) - .containsExactlyInAnyOrder(expectedObjectKey1, expectedObjectKey2); - verify(wrapper, times(2)).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); - List insertedObjectKeys = objectKeyCaptor.getAllValues().subList(2, 4); - assertThat(insertedObjectKeys) - .containsExactlyInAnyOrder(expectedObjectKey1, expectedObjectKey2); - - List insertedPayloads = payloadCaptor.getAllValues(); - for (int i = 0; i < insertedPayloads.size(); i++) { - Map insertedPartition = - Serializer.deserialize( - insertedPayloads.get(i), new TypeReference>() {}); - if (insertedObjectKeys.get(i).equals(expectedObjectKey1)) { - assertThat(insertedPartition).containsKey(mutation1.getRecordId()); - assertThat(insertedPartition.get(mutation1.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(insertedPartition).containsKey(mutation2.getRecordId()); - assertThat(insertedPartition.get(mutation2.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - } else if (insertedObjectKeys.get(i).equals(expectedObjectKey2)) { - assertThat(insertedPartition).containsKey(mutation3.getRecordId()); - assertThat(insertedPartition.get(mutation3.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - assertThat(insertedPartition).containsKey(mutation4.getRecordId()); - assertThat(insertedPartition.get(mutation4.getRecordId()).getValues()) - .containsEntry(ANY_NAME_3, ANY_INT_1) - .containsEntry(ANY_NAME_4, ANY_INT_2); - } - } - } -} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java index f2bdd1069e..956ecd24c3 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java @@ -1,8 +1,10 @@ package com.scalar.db.storage.objectstorage; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; import static org.assertj.core.api.Assertions.catchThrowable; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -73,12 +75,13 @@ public void getTableMetadata_ShouldReturnCorrectTableMetadata() throws Exception Map clusteringOrders = ImmutableMap.of("c2", "ASC", "c3", "DESC"); ObjectStorageTableMetadata objectStorageTableMetadata = - new ObjectStorageTableMetadata( - partitionKeyNames, - clusteringKeyNames, - clusteringOrders, - Collections.emptySet(), - columnsMap); + ObjectStorageTableMetadata.newBuilder() + .partitionKeyNames(partitionKeyNames) + .clusteringKeyNames(clusteringKeyNames) + .clusteringOrders(clusteringOrders) + .secondaryIndexNames(Collections.emptySet()) + .columns(columnsMap) + .build(); Map metadataTable = new HashMap<>(); metadataTable.put(tableMetadataKey, objectStorageTableMetadata); @@ -296,7 +299,8 @@ public void createTable_ShouldInsertTableMetadata() throws Exception { } @Test - public void dropNamespace_ShouldDeleteNamespaceMetadata() throws Exception { + public void dropNamespace_ShouldDeleteNamespaceMetadataAndDeleteMetadataTableIfEmpty() + throws Exception { // Arrange String namespace = "ns"; Map metadataTable = new HashMap<>(); @@ -317,6 +321,132 @@ public void dropNamespace_ShouldDeleteNamespaceMetadata() throws Exception { verify(wrapper).delete(eq(expectedObjectKey), eq("version1")); } + @Test + public void dropNamespace_ShouldDeleteNamespaceMetadataAndUpdateMetadataTableIfNotEmpty() + throws Exception { + // Arrange + String namespace = "ns"; + String anotherNamespace = "other_ns"; + Map metadataTable = new HashMap<>(); + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + metadataTable.put(anotherNamespace, new ObjectStorageNamespaceMetadata(anotherNamespace)); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropNamespace(namespace); + + // Assert + verify(wrapper).update(eq(expectedObjectKey), payloadCaptor.capture(), eq("version1")); + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(updatedMetadata).doesNotContainKey(namespace); + assertThat(updatedMetadata).containsKey(anotherNamespace); + } + + @Test + public void dropTable_ShouldDeleteTableMetadataAndDropMetadataTableIfEmpty() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, ObjectStorageTableMetadata.newBuilder().build()); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropTable(namespace, table); + + // Assert + verify(wrapper).delete(eq(expectedObjectKey), eq("version1")); + } + + @Test + public void dropTable_ShouldDeleteTableMetadataAndUpdateMetadataTableIfNotEmpty() + throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String anotherTable = "tbl2"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + String anotherTableMetadataKey = + namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + anotherTable; + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, ObjectStorageTableMetadata.newBuilder().build()); + metadataTable.put(anotherTableMetadataKey, ObjectStorageTableMetadata.newBuilder().build()); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropTable(namespace, table); + + // Assert + verify(wrapper).update(eq(expectedObjectKey), payloadCaptor.capture(), eq("version1")); + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(updatedMetadata).doesNotContainKey(tableMetadataKey); + assertThat(updatedMetadata).containsKey(anotherTableMetadataKey); + } + + @Test + public void truncateTable_ShouldDeleteTableData() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableDataPrefix = ObjectStorageUtils.getObjectKey(namespace, table, ""); + + // Act + admin.truncateTable(namespace, table); + + // Assert + verify(wrapper).deleteByPrefix(tableDataPrefix); + verify(wrapper, never()) + .delete( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + } + + @Test + public void truncateTable_WithMetadataOnlyTable_ShouldNotThrowException() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableDataPrefix = ObjectStorageUtils.getObjectKey(namespace, table, ""); + + // Act Assert + assertThatCode(() -> admin.truncateTable(namespace, table)).doesNotThrowAnyException(); + verify(wrapper).deleteByPrefix(tableDataPrefix); + verify(wrapper, never()) + .delete( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + } + @Test public void getNamespaceTableNames_ShouldReturnTableNamesProperly() throws Exception { // Arrange @@ -326,12 +456,9 @@ public void getNamespaceTableNames_ShouldReturnTableNamesProperly() throws Excep String tableMetadataKey3 = "other_ns" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t3"; Map metadataTable = new HashMap<>(); - metadataTable.put( - tableMetadataKey1, new ObjectStorageTableMetadata(null, null, null, null, null)); - metadataTable.put( - tableMetadataKey2, new ObjectStorageTableMetadata(null, null, null, null, null)); - metadataTable.put( - tableMetadataKey3, new ObjectStorageTableMetadata(null, null, null, null, null)); + metadataTable.put(tableMetadataKey1, ObjectStorageTableMetadata.newBuilder().build()); + metadataTable.put(tableMetadataKey2, ObjectStorageTableMetadata.newBuilder().build()); + metadataTable.put(tableMetadataKey3, ObjectStorageTableMetadata.newBuilder().build()); String serializedTableMetadata = Serializer.serialize(metadataTable); ObjectStorageWrapperResponse tableMetadataResponse = @@ -372,8 +499,11 @@ public void addNewColumnToTable_ShouldWorkProperly() throws Exception { LinkedHashSet partitionKeyNames = Sets.newLinkedHashSet(currentColumn); Map columns = ImmutableMap.of(currentColumn, "text"); ObjectStorageTableMetadata existingTableMetadata = - new ObjectStorageTableMetadata( - partitionKeyNames, null, null, Collections.emptySet(), columns); + ObjectStorageTableMetadata.newBuilder() + .partitionKeyNames(partitionKeyNames) + .secondaryIndexNames(Collections.emptySet()) + .columns(columns) + .build(); Map metadataTable = new HashMap<>(); metadataTable.put(tableMetadataKey, existingTableMetadata); @@ -484,17 +614,6 @@ public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception String tableMetadataKey2 = "ns1" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl2"; String tableMetadataKey3 = "ns2" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl3"; - Map tableMetadataMap = new HashMap<>(); - tableMetadataMap.put( - tableMetadataKey1, new ObjectStorageTableMetadata(null, null, null, null, null)); - tableMetadataMap.put( - tableMetadataKey2, new ObjectStorageTableMetadata(null, null, null, null, null)); - tableMetadataMap.put( - tableMetadataKey3, new ObjectStorageTableMetadata(null, null, null, null, null)); - String serializedTableMetadata = Serializer.serialize(tableMetadataMap); - ObjectStorageWrapperResponse tableMetadataResponse = - new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); - Map namespaceMetadataMap = new HashMap<>(); String serializedNamespaceMetadata = Serializer.serialize(namespaceMetadataMap); ObjectStorageWrapperResponse namespaceMetadataResponse = @@ -508,6 +627,13 @@ public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); // Mock table metadata to return existing tables + Map tableMetadataMap = new HashMap<>(); + tableMetadataMap.put(tableMetadataKey1, ObjectStorageTableMetadata.newBuilder().build()); + tableMetadataMap.put(tableMetadataKey2, ObjectStorageTableMetadata.newBuilder().build()); + tableMetadataMap.put(tableMetadataKey3, ObjectStorageTableMetadata.newBuilder().build()); + String serializedTableMetadata = Serializer.serialize(tableMetadataMap); + ObjectStorageWrapperResponse tableMetadataResponse = + new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); when(wrapper.get(tableMetadataObjectKey)).thenReturn(Optional.of(tableMetadataResponse)); // First call returns empty namespace metadata, second call returns metadata with ns1 @@ -517,7 +643,6 @@ public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception Serializer.serialize(namespaceMetadataMapAfterInsert); ObjectStorageWrapperResponse namespaceMetadataResponseAfterInsert = new ObjectStorageWrapperResponse(serializedNamespaceMetadataAfterInsert, "version3"); - when(wrapper.get(namespaceMetadataObjectKey)) .thenReturn(Optional.of(namespaceMetadataResponse)) .thenReturn(Optional.of(namespaceMetadataResponseAfterInsert)); @@ -526,7 +651,7 @@ public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception admin.upgrade(Collections.emptyMap()); // Assert - // First namespace should trigger insert (when metadata table is empty) + // First namespace should trigger insert when metadata table is empty verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); Map insertedMetadata = diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java deleted file mode 100644 index 4f8cd0a2c4..0000000000 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java +++ /dev/null @@ -1,114 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import static org.assertj.core.api.AssertionsForClassTypes.assertThat; -import static org.mockito.Mockito.when; - -import com.scalar.db.api.Delete; -import com.scalar.db.api.Put; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.Key; -import java.util.Collections; -import java.util.LinkedHashSet; -import org.assertj.core.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ObjectStorageMutationTest { - private static final String ANY_NAMESPACE_NAME = "namespace"; - private static final String ANY_TABLE_NAME = "table"; - private static final String ANY_NAME_1 = "name1"; - private static final String ANY_NAME_2 = "name2"; - private static final String ANY_NAME_3 = "name3"; - private static final String ANY_NAME_4 = "name4"; - private static final String ANY_TEXT_1 = "text1"; - private static final String ANY_TEXT_2 = "text2"; - private static final int ANY_INT_1 = 1; - private static final int ANY_INT_2 = 2; - - @Mock private TableMetadata metadata; - - @BeforeEach - public void setUp() throws Exception { - MockitoAnnotations.openMocks(this).close(); - - when(metadata.getPartitionKeyNames()) - .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); - } - - private Put preparePut() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); - return Put.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .intValue(ANY_NAME_3, ANY_INT_1) - .intValue(ANY_NAME_4, ANY_INT_2) - .build(); - } - - private Delete prepareDelete() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); - return Delete.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .build(); - } - - @Test - public void makeRecord_PutGiven_ShouldReturnWithValues() { - // Arrange - Put put = preparePut(); - ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); - String concatenatedKey = objectStorageMutation.getRecordId(); - - // Act - ObjectStorageRecord actual = objectStorageMutation.makeRecord(); - - // Assert - assertThat(actual.getId()).isEqualTo(concatenatedKey); - Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); - Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); - Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isEqualTo(ANY_INT_1); - Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); - } - - @Test - public void makeRecord_PutWithNullValueGiven_ShouldReturnWithValues() { - // Arrange - Put put = preparePut(); - put = Put.newBuilder(put).intValue(ANY_NAME_3, null).build(); - ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); - String concatenatedKey = objectStorageMutation.getRecordId(); - - // Act - ObjectStorageRecord actual = objectStorageMutation.makeRecord(); - - // Assert - assertThat(actual.getId()).isEqualTo(concatenatedKey); - Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); - Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); - Assertions.assertThat(actual.getValues().containsKey(ANY_NAME_3)).isTrue(); - Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isNull(); - Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); - } - - @Test - public void makeRecord_DeleteGiven_ShouldReturnEmpty() { - // Arrange - Delete delete = prepareDelete(); - ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(delete, metadata); - - // Act - ObjectStorageRecord actual = objectStorageMutation.makeRecord(); - - // Assert - assertThat(actual.getId()).isEqualTo(""); - } -} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java deleted file mode 100644 index bef7822d98..0000000000 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java +++ /dev/null @@ -1,669 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import static com.scalar.db.api.ConditionBuilder.column; -import static com.scalar.db.api.ConditionBuilder.deleteIf; -import static com.scalar.db.api.ConditionBuilder.deleteIfExists; -import static com.scalar.db.api.ConditionBuilder.putIf; -import static com.scalar.db.api.ConditionBuilder.putIfExists; -import static com.scalar.db.api.ConditionBuilder.putIfNotExists; -import static org.assertj.core.api.Assertions.assertThatCode; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; -import static org.mockito.MockitoAnnotations.openMocks; - -import com.scalar.db.api.Delete; -import com.scalar.db.api.Get; -import com.scalar.db.api.MutationCondition; -import com.scalar.db.api.Put; -import com.scalar.db.api.Scan; -import com.scalar.db.api.StorageInfo; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.common.StorageInfoImpl; -import com.scalar.db.common.StorageInfoProvider; -import com.scalar.db.common.TableMetadataManager; -import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.exception.storage.ExecutionException; -import com.scalar.db.io.DataType; -import com.scalar.db.io.Key; -import java.util.Arrays; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.Mock; - -public class ObjectStorageOperationCheckerTest { - private static final String NAMESPACE_NAME = "n1"; - private static final String TABLE_NAME = "t1"; - private static final String PKEY1 = "p1"; - private static final String CKEY1 = "c1"; - private static final String COL1 = "v1"; - private static final String COL2 = "v2"; - private static final String COL3 = "v3"; - private static final String COL4 = "v4"; - private static final StorageInfo STORAGE_INFO = - new StorageInfoImpl("ObjectStorage", StorageInfo.MutationAtomicityUnit.STORAGE, 100); - - private static final TableMetadata TABLE_METADATA1 = - TableMetadata.newBuilder() - .addColumn(PKEY1, DataType.INT) - .addColumn(CKEY1, DataType.INT) - .addColumn(COL1, DataType.INT) - .addColumn(COL2, DataType.BOOLEAN) - .addColumn(COL3, DataType.TEXT) - .addColumn(COL4, DataType.BLOB) - .addPartitionKey(PKEY1) - .addClusteringKey(CKEY1) - .build(); - - private static final TableMetadata TABLE_METADATA2 = - TableMetadata.newBuilder() - .addColumn(PKEY1, DataType.TEXT) - .addColumn(CKEY1, DataType.TEXT) - .addPartitionKey(PKEY1) - .addClusteringKey(CKEY1) - .build(); - - @Mock private DatabaseConfig databaseConfig; - @Mock private TableMetadataManager metadataManager; - @Mock private StorageInfoProvider storageInfoProvider; - private ObjectStorageOperationChecker operationChecker; - - @BeforeEach - public void setUp() throws Exception { - openMocks(this).close(); - when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); - operationChecker = - new ObjectStorageOperationChecker(databaseConfig, metadataManager, storageInfoProvider); - } - - @Test - public void check_ForMutationsWithPut_ShouldDoNothing() throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); - Put putWithoutSettingIndex = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 0)) - .build(); - Put put = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 1)) - .intValue(COL1, 1) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(Arrays.asList(putWithoutSettingIndex, put))) - .doesNotThrowAnyException(); - } - - @Test - public void check_ForMutationsWithDelete_ShouldDoNothing() throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); - Delete deleteWithoutSettingIndex = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 0)) - .build(); - Delete delete = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 1)) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(Arrays.asList(deleteWithoutSettingIndex, delete))) - .doesNotThrowAnyException(); - } - - @Test - public void - check_GetGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); - - Get get1 = - Get.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Get get2 = - Get.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Get get3 = - Get.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .build(); - Get get4 = - Get.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Get get5 = - Get.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(get1)).doesNotThrowAnyException(); - assertThatThrownBy(() -> operationChecker.check(get2)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(get3)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(get4)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(get5)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void - check_ScanGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); - - Scan scan1 = - Scan.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .start(Key.ofText(CKEY1, "ab")) - .end(Key.ofText(CKEY1, "ab")) - .build(); - Scan scan2 = - Scan.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .start(Key.ofText(CKEY1, "ab")) - .end(Key.ofText(CKEY1, "ab")) - .build(); - Scan scan3 = - Scan.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .start(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .end(Key.ofText(CKEY1, "ab")) - .build(); - Scan scan4 = - Scan.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .start(Key.ofText(CKEY1, "ab")) - .end(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(scan1)).doesNotThrowAnyException(); - assertThatThrownBy(() -> operationChecker.check(scan2)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(scan3)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(scan4)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void - check_PutGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); - - Put put1 = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Put put2 = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Put put3 = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(put1)).doesNotThrowAnyException(); - assertThatThrownBy(() -> operationChecker.check(put2)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(put3)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void - check_DeleteGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); - - Delete delete1 = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Delete delete2 = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Delete delete3 = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(delete1)).doesNotThrowAnyException(); - assertThatThrownBy(() -> operationChecker.check(delete2)) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(delete3)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void - check_MutationsGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); - when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); - - Put put1 = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Put put2 = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Delete delete1 = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab")) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - Delete delete2 = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) - .clusteringKey(Key.ofText(CKEY1, "ab")) - .build(); - - // Act Assert - assertThatCode(() -> operationChecker.check(Arrays.asList(put1, delete1))) - .doesNotThrowAnyException(); - assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put2, delete1))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put1, delete2))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void check_ForPutWithCondition_ShouldBehaveProperly() throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); - - // Act Assert - assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfExists()))) - .doesNotThrowAnyException(); - assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfNotExists()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition( - putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL1).isNullInt()).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL2).isNotEqualToBoolean(true)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()))) - .doesNotThrowAnyException(); - assertThatThrownBy( - () -> - operationChecker.check( - buildPutWithCondition(putIf(column(COL2).isGreaterThanBoolean(false)).build()))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( - () -> - operationChecker.check( - buildPutWithCondition( - putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void check_ForDeleteWithCondition_ShouldBehaveProperly() throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); - - // Act Assert - assertThatCode(() -> operationChecker.check(buildDeleteWithCondition(deleteIfExists()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition( - deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition( - deleteIf(column(COL2).isEqualToBoolean(true)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition( - deleteIf(column(COL2).isNotEqualToBoolean(true)).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()))) - .doesNotThrowAnyException(); - assertThatThrownBy( - () -> - operationChecker.check( - buildDeleteWithCondition( - deleteIf(column(COL2).isGreaterThanBoolean(false)).build()))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( - () -> - operationChecker.check( - buildDeleteWithCondition( - deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void check_ForMutationsWithPutWithCondition_ShouldBehaveProperly() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); - Put put = - Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 1)) - .intValue(COL1, 1) - .build(); - - // Act Assert - assertThatCode( - () -> operationChecker.check(Arrays.asList(buildPutWithCondition(putIfExists()), put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check(Arrays.asList(buildPutWithCondition(putIfNotExists()), put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()), put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition( - putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), - put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition(putIf(column(COL1).isNullInt()).build()), put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()), - put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition( - putIf(column(COL2).isNotEqualToBoolean(true)).build()), - put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()), put))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()), - put))) - .doesNotThrowAnyException(); - assertThatThrownBy( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition( - putIf(column(COL2).isGreaterThanBoolean(false)).build()), - put))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( - () -> - operationChecker.check( - Arrays.asList( - buildPutWithCondition( - putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), - put))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void check_ForMutationsWithDeleteWithCondition_ShouldBehaveProperly() - throws ExecutionException { - // Arrange - when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); - Delete delete = - Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 1)) - .build(); - - // Act Assert - assertThatCode( - () -> - operationChecker.check( - Arrays.asList(buildDeleteWithCondition(deleteIfExists()), delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()), - delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition( - deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), - delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()), - delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition( - deleteIf(column(COL2).isEqualToBoolean(true)).build()), - delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition( - deleteIf(column(COL2).isNotEqualToBoolean(true)).build()), - delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()), - delete))) - .doesNotThrowAnyException(); - assertThatCode( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()), - delete))) - .doesNotThrowAnyException(); - assertThatThrownBy( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition( - deleteIf(column(COL2).isGreaterThanBoolean(false)).build()), - delete))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( - () -> - operationChecker.check( - Arrays.asList( - buildDeleteWithCondition( - deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), - delete))) - .isInstanceOf(IllegalArgumentException.class); - } - - private Put buildPutWithCondition(MutationCondition condition) { - return Put.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 1)) - .intValue(COL1, 1) - .condition(condition) - .build(); - } - - private Delete buildDeleteWithCondition(MutationCondition condition) { - return Delete.newBuilder() - .namespace(NAMESPACE_NAME) - .table(TABLE_NAME) - .partitionKey(Key.ofInt(PKEY1, 0)) - .clusteringKey(Key.ofInt(CKEY1, 1)) - .condition(condition) - .build(); - } -} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java deleted file mode 100644 index 6bdb1347d4..0000000000 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java +++ /dev/null @@ -1,109 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.scalar.db.api.Get; -import com.scalar.db.api.Operation; -import com.scalar.db.api.Put; -import com.scalar.db.api.TableMetadata; -import com.scalar.db.io.Key; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashSet; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ObjectStorageOperationTest { - private static final String ANY_NAMESPACE_NAME = "namespace"; - private static final String ANY_TABLE_NAME = "table"; - private static final String ANY_NAME_1 = "name1"; - private static final String ANY_NAME_2 = "name2"; - private static final String ANY_NAME_3 = "name3"; - private static final String ANY_TEXT_1 = "text1"; - private static final String ANY_TEXT_2 = "text2"; - private static final int ANY_INT_1 = 1; - - @Mock private TableMetadata metadata; - - @BeforeEach - public void setUp() throws Exception { - MockitoAnnotations.openMocks(this).close(); - } - - @Test - public void checkArgument_WrongOperationGiven_ShouldThrowIllegalArgumentException() { - // Arrange - Operation operation = mock(Put.class); - ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(operation, metadata); - - // Act Assert - assertThatThrownBy(() -> objectStorageOperation.checkArgument(Get.class)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void getConcatenatedPartitionKey_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { - // Arrange - when(metadata.getPartitionKeyNames()) - .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_2, ANY_NAME_3))); - - Key partitionKey = - Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_2, ANY_TEXT_2, ANY_NAME_3, ANY_INT_1); - Get get = - Get.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .build(); - ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); - - // Act - String actual = objectStorageOperation.getConcatenatedPartitionKey(); - - // Assert - assertThat(actual) - .isEqualTo( - String.join( - String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), - ANY_TEXT_1, - ANY_TEXT_2, - String.valueOf(ANY_INT_1))); - } - - @Test - public void getId_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { - // Arrange - when(metadata.getPartitionKeyNames()) - .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_3))); - when(metadata.getClusteringKeyNames()) - .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); - - Key partitionKey = Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_3, ANY_INT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); - Get get = - Get.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .build(); - ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); - - // Act - String actual = objectStorageOperation.getRecordId(); - - // Assert - assertThat(actual) - .isEqualTo( - String.join( - String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), - ANY_TEXT_1, - String.valueOf(ANY_INT_1), - ANY_TEXT_2)); - } -} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java index 0c57b5f583..b6ae057928 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java @@ -5,6 +5,7 @@ import org.junit.jupiter.api.Test; public class ObjectStorageUtilsTest { + @Test public void getObjectKey_GivenAllNames_ShouldReturnExpectedObjectKey() { // Arrange From d5bf8ce04d83f46a7e6fc63d4b7e8121f4655d9e Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 11:02:42 +0900 Subject: [PATCH 05/20] Discard the unnecessary change --- .github/workflows/ci.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index cb435e026f..a13aa68691 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1149,7 +1149,8 @@ jobs: integration-test-for-jdbc-sqlserver-2017: name: SQL Server 2017 integration test (${{ matrix.mode.label }}) runs-on: windows-latest - env: # Official Oracle JDK images that are windows compatible and publicly available through direct download do not exist for JDK 8 and 11 so we use instead cached versions hosted on the Scalar container registry. + env: + # Official Oracle JDK images that are windows compatible and publicly available through direct download do not exist for JDK 8 and 11 so we use instead cached versions hosted on the Scalar container registry. # This variable evaluates to: if {!(Temurin JDK 8) && !(Oracle JDK 8 or 11)} then {true} else {false} SET_UP_INT_TEST_RUNTIME_JDK_WHEN_NOT_ORACLE_8_OR_11: "${{ (github.event_name == 'workflow_dispatch' && !(inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' && inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'temurin') && !(inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' && (inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' || inputs.INT_TEST_JAVA_RUNTIME_VERSION == '11'))) && 'true' || 'false' }}" SET_UP_INT_TEST_RUNTIME_ORACLE_JDK_8_OR_11: "${{ ((inputs.INT_TEST_JAVA_RUNTIME_VERSION == '8' || inputs.INT_TEST_JAVA_RUNTIME_VERSION == '11') && inputs.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle') && 'true' || 'false' }}" From e4cfa8659a18f5e28721ec6b97753c102870fb85 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 11:44:57 +0900 Subject: [PATCH 06/20] Fix workflow --- .github/workflows/ci.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a13aa68691..64b6af8ed2 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -2126,6 +2126,15 @@ jobs: name: Azure Blob Storage integration test (${{ matrix.mode.label }}) runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + mode: + - label: default + group_commit_enabled: false + - label: with_group_commit + group_commit_enabled: true + services: postgres: image: mcr.microsoft.com/azure-storage/azurite @@ -2174,7 +2183,7 @@ jobs: --connection-string "DefaultEndpointsProtocol=http;AccountName=test;AccountKey=test;BlobEndpoint=http://localhost:10000/test;" - name: Execute Gradle 'integrationTestObjectStorage' task - run: ./gradlew integrationTestObjectStorage -Dscalardb.jdbc.url=http://localhost:5432/ -Dscalardb.jdbc.username=test -Dscalardb.jdbc.password=test ${{ matrix.mode.group_commit_enabled && env.INT_TEST_GRADLE_OPTIONS_FOR_GROUP_COMMIT || '' }} + run: ./gradlew integrationTestObjectStorage -Dscalardb.object_storage.endpoint=http://localhost:10000/test/test-container -Dscalardb.object_storage.username=test -Dscalardb.object_storage.password=test ${{ matrix.mode.group_commit_enabled && env.INT_TEST_GRADLE_OPTIONS_FOR_GROUP_COMMIT || '' }} - name: Upload Gradle test reports if: always() From 02a67088f03f936fa2acd6aa30a16738c410b385 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 13:30:20 +0900 Subject: [PATCH 07/20] Apply suggestions --- .../ObjectStorageAdminTestUtils.java | 20 +++++++++++++------ .../ObjectStorageTableMetadata.java | 15 +++++++++----- .../objectstorage/blob/BlobWrapper.java | 3 +-- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java index 63fc24f813..ff52a25b70 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java @@ -31,16 +31,24 @@ public void dropMetadataTable() { @Override public void truncateNamespacesTable() throws Exception { - wrapper.delete( - ObjectStorageUtils.getObjectKey( - metadataNamespace, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE)); + try { + wrapper.delete( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE)); + } catch (PreconditionFailedException e) { + // The namespace metadata table object does not exist, so do nothing + } } @Override public void truncateMetadataTable() throws Exception { - wrapper.delete( - ObjectStorageUtils.getObjectKey( - metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + try { + wrapper.delete( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + } catch (PreconditionFailedException e) { + // The table metadata table object does not exist, so do nothing + } } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 03ae458f45..1a909f9c41 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -6,6 +6,7 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Map; import java.util.Objects; @@ -34,13 +35,17 @@ public ObjectStorageTableMetadata( @Nullable Map clusteringOrders, @Nullable Set secondaryIndexNames, @Nullable Map columns) { - this.partitionKeyNames = partitionKeyNames != null ? partitionKeyNames : new LinkedHashSet<>(); + this.partitionKeyNames = + partitionKeyNames != null ? new LinkedHashSet<>(partitionKeyNames) : new LinkedHashSet<>(); this.clusteringKeyNames = - clusteringKeyNames != null ? clusteringKeyNames : new LinkedHashSet<>(); - this.clusteringOrders = clusteringOrders != null ? clusteringOrders : Collections.emptyMap(); + clusteringKeyNames != null + ? new LinkedHashSet<>(clusteringKeyNames) + : new LinkedHashSet<>(); + this.clusteringOrders = + clusteringOrders != null ? new HashMap<>(clusteringOrders) : Collections.emptyMap(); this.secondaryIndexNames = - secondaryIndexNames != null ? secondaryIndexNames : Collections.emptySet(); - this.columns = columns != null ? columns : Collections.emptyMap(); + secondaryIndexNames != null ? new HashSet<>(secondaryIndexNames) : Collections.emptySet(); + this.columns = columns != null ? new HashMap<>(columns) : Collections.emptyMap(); } public ObjectStorageTableMetadata(TableMetadata tableMetadata) { diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java index 816409df86..23c3d5a3b6 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java @@ -119,8 +119,7 @@ public void update(String key, String object, String version) || e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw new PreconditionFailedException( String.format( - String.format( - "Failed to update the object with key '%s' due to precondition failure", key)), + "Failed to update the object with key '%s' due to precondition failure", key), e); } throw new ObjectStorageWrapperException( From d2fb32f035dd32c337fc572d86d57160dce9ff5c Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 13:41:22 +0900 Subject: [PATCH 08/20] Apply suggestion --- .../db/storage/objectstorage/blob/BlobWrapper.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java index 23c3d5a3b6..775a91ce9b 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java @@ -177,7 +177,16 @@ public void deleteByPrefix(String prefix) throws ObjectStorageWrapperException { try { client .listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) - .forEach(blobItem -> client.getBlobClient(blobItem.getName()).delete()); + .forEach( + blobItem -> { + try { + client.getBlobClient(blobItem.getName()).delete(); + } catch (BlobStorageException e) { + if (!e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw e; + } + } + }); } catch (Exception e) { throw new ObjectStorageWrapperException( String.format("Failed to delete the objects with prefix '%s'", prefix), e); From ca8b0b679f5de6de1bd79f2690664439c4efd550 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 13:52:48 +0900 Subject: [PATCH 09/20] Apply suggestions --- .../objectstorage/ObjectStorageAdmin.java | 38 +++++-------------- 1 file changed, 10 insertions(+), 28 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java index f2aa9743c8..479b39eabf 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -443,29 +443,20 @@ private void deleteTableMetadata(String namespace, String table) throws Executio private Map getNamespaceMetadataTable() throws ExecutionException { - try { - Optional response = - wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE)); - if (!response.isPresent()) { - return Collections.emptyMap(); - } - return Serializer.deserialize( - response.get().getPayload(), - new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - throw new ExecutionException("Failed to get the metadata table.", e); - } + return getNamespaceMetadataTable(null); } private Map getNamespaceMetadataTable( - Map readVersionMap) throws ExecutionException { + @Nullable Map readVersionMap) throws ExecutionException { try { Optional response = wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE)); if (!response.isPresent()) { return Collections.emptyMap(); } - readVersionMap.put(NAMESPACE_METADATA_TABLE, response.get().getVersion()); + if (readVersionMap != null) { + readVersionMap.put(NAMESPACE_METADATA_TABLE, response.get().getVersion()); + } return Serializer.deserialize( response.get().getPayload(), new TypeReference>() {}); @@ -476,29 +467,20 @@ private Map getNamespaceMetadataTable( private Map getTableMetadataTable() throws ExecutionException { - try { - Optional response = - wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE)); - if (!response.isPresent()) { - return Collections.emptyMap(); - } - return Serializer.deserialize( - response.get().getPayload(), - new TypeReference>() {}); - } catch (ObjectStorageWrapperException e) { - throw new ExecutionException("Failed to get the metadata table.", e); - } + return getTableMetadataTable(null); } private Map getTableMetadataTable( - Map readVersionMap) throws ExecutionException { + @Nullable Map readVersionMap) throws ExecutionException { try { Optional response = wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE)); if (!response.isPresent()) { return Collections.emptyMap(); } - readVersionMap.put(TABLE_METADATA_TABLE, response.get().getVersion()); + if (readVersionMap != null) { + readVersionMap.put(TABLE_METADATA_TABLE, response.get().getVersion()); + } return Serializer.deserialize( response.get().getPayload(), new TypeReference>() {}); From 6fba352e78f77c22cb5a4b58d2618913a0aac541 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 31 Oct 2025 19:53:39 +0900 Subject: [PATCH 10/20] Apply suggestions --- .../db/storage/objectstorage/ObjectStorageTableMetadata.java | 5 ++++- .../scalar/db/storage/objectstorage/blob/BlobWrapper.java | 2 -- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 1a909f9c41..afe04f2b94 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -11,6 +11,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; @@ -51,7 +52,9 @@ public ObjectStorageTableMetadata( public ObjectStorageTableMetadata(TableMetadata tableMetadata) { Map clusteringOrders = tableMetadata.getClusteringKeyNames().stream() - .collect(Collectors.toMap(c -> c, c -> tableMetadata.getClusteringOrder(c).name())); + .collect( + Collectors.toMap( + Function.identity(), c -> tableMetadata.getClusteringOrder(c).name())); Map columnTypeByName = new HashMap<>(); tableMetadata .getColumnNames() diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java index 775a91ce9b..71b42d1f49 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java @@ -24,8 +24,6 @@ import java.util.stream.Collectors; public class BlobWrapper implements ObjectStorageWrapper { - public static final String STORAGE_NAME = "blob"; - private final BlobContainerClient client; private final Duration requestTimeoutInSeconds; private final ParallelTransferOptions parallelTransferOptions; From 2b7b8e0ee3f74aedf83ab296b69385a1b3fda05f Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Sat, 1 Nov 2025 16:22:30 +0900 Subject: [PATCH 11/20] Refactor ObjectStorageAdmin --- .../objectstorage/ObjectStorageAdmin.java | 242 ++++++++---------- .../objectstorage/ObjectStorageAdminTest.java | 34 +-- 2 files changed, 104 insertions(+), 172 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java index 479b39eabf..0804762e4d 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -78,7 +78,20 @@ public void close() { public void createNamespace(String namespace, Map options) throws ExecutionException { try { - insertNamespaceMetadata(namespace); + // Insert the namespace metadata + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + assert !metadataTable.containsKey(namespace); + if (metadataTable.isEmpty()) { + Map newMetadataTable = + Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace)); + insertMetadataTable(NAMESPACE_METADATA_TABLE, newMetadataTable); + } else { + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + updateMetadataTable( + NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); + } } catch (Exception e) { throw new ExecutionException( String.format("Failed to create the namespace %s", namespace), e); @@ -90,7 +103,20 @@ public void createTable( String namespace, String table, TableMetadata metadata, Map options) throws ExecutionException { try { - insertTableMetadata(namespace, table, metadata); + // Insert the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + assert !metadataTable.containsKey(tableMetadataKey); + if (metadataTable.isEmpty()) { + Map newMetadataTable = + Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + insertMetadataTable(TABLE_METADATA_TABLE, newMetadataTable); + } else { + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } } catch (Exception e) { throw new ExecutionException( String.format( @@ -103,7 +129,18 @@ public void createTable( public void dropTable(String namespace, String table) throws ExecutionException { try { deleteTableData(namespace, table); - deleteTableMetadata(namespace, table); + // Delete the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + assert metadataTable.containsKey(tableMetadataKey); + metadataTable.remove(tableMetadataKey); + String readVersion = readVersionMap.get(TABLE_METADATA_TABLE); + if (metadataTable.isEmpty()) { + deleteMetadataTable(TABLE_METADATA_TABLE, readVersion); + } else { + updateMetadataTable(TABLE_METADATA_TABLE, metadataTable, readVersion); + } } catch (Exception e) { throw new ExecutionException( String.format( @@ -130,7 +167,18 @@ public void dropIndex(String namespace, String table, String columnName) @Override public void dropNamespace(String namespace) throws ExecutionException { try { - deleteNamespaceMetadata(namespace); + // Delete the namespace metadata + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + assert metadataTable.containsKey(namespace); + metadataTable.remove(namespace); + String readVersion = readVersionMap.get(NAMESPACE_METADATA_TABLE); + if (metadataTable.isEmpty()) { + deleteMetadataTable(NAMESPACE_METADATA_TABLE, readVersion); + } else { + updateMetadataTable(NAMESPACE_METADATA_TABLE, metadataTable, readVersion); + } } catch (Exception e) { throw new ExecutionException(String.format("Failed to drop the namespace %s", namespace), e); } @@ -201,7 +249,19 @@ public boolean namespaceExists(String namespace) throws ExecutionException { public void repairNamespace(String namespace, Map options) throws ExecutionException { try { - upsertNamespaceMetadata(namespace); + // Upsert the namespace metadata + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + if (metadataTable.isEmpty()) { + insertMetadataTable( + NAMESPACE_METADATA_TABLE, + Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace))); + } else { + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + updateMetadataTable( + NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); + } } catch (Exception e) { throw new ExecutionException( String.format("Failed to repair the namespace %s", namespace), e); @@ -224,7 +284,19 @@ public void repairTable( String namespace, String table, TableMetadata metadata, Map options) throws ExecutionException { try { - upsertTableMetadata(namespace, table, metadata); + // Upsert the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + if (metadataTable.isEmpty()) { + insertMetadataTable( + TABLE_METADATA_TABLE, + Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata))); + } else { + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } } catch (Exception e) { throw new ExecutionException( String.format( @@ -238,10 +310,16 @@ public void addNewColumnToTable( String namespace, String table, String columnName, DataType columnType) throws ExecutionException { try { - TableMetadata currentTableMetadata = getTableMetadata(namespace, table); + // Update the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + TableMetadata currentTableMetadata = metadataTable.get(tableMetadataKey).toTableMetadata(); TableMetadata updatedTableMetadata = TableMetadata.newBuilder(currentTableMetadata).addColumn(columnName, columnType).build(); - upsertTableMetadata(namespace, table, updatedTableMetadata); + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(updatedTableMetadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); } catch (Exception e) { throw new ExecutionException( String.format( @@ -293,151 +371,31 @@ public Set getNamespaceNames() throws ExecutionException { @Override public void upgrade(Map options) throws ExecutionException { try { - Map metadataTable = getTableMetadataTable(); + // Get all namespace names from the table metadata table + Map tableMetadataTable = getTableMetadataTable(); List namespaceNames = - metadataTable.keySet().stream() + tableMetadataTable.keySet().stream() .map(ObjectStorageAdmin::getNamespaceNameFromTableMetadataKey) .distinct() .collect(Collectors.toList()); - for (String namespaceName : namespaceNames) { - upsertNamespaceMetadata(namespaceName); - } - } catch (Exception e) { - throw new ExecutionException("Failed to upgrade", e); - } - } - - private void insertNamespaceMetadata(String namespace) throws ExecutionException { - try { + // Upsert the namespace metadata table Map readVersionMap = new HashMap<>(); - Map metadataTable = + Map namespaceMetadataTable = getNamespaceMetadataTable(readVersionMap); - if (metadataTable.containsKey(namespace)) { - throw new ExecutionException( - String.format("The namespace metadata already exists: %s", namespace)); - } - if (metadataTable.isEmpty()) { - insertMetadataTable( - NAMESPACE_METADATA_TABLE, - Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace))); - } else { - metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); - updateMetadataTable( - NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); - } - } catch (Exception e) { - throw new ExecutionException( - String.format("Failed to insert the namespace metadata: %s", namespace), e); - } - } - - private void insertTableMetadata(String namespace, String table, TableMetadata metadata) - throws ExecutionException { - String tableMetadataKey = getTableMetadataKey(namespace, table); - try { - Map readVersionMap = new HashMap<>(); - Map metadataTable = getTableMetadataTable(readVersionMap); - if (metadataTable.containsKey(tableMetadataKey)) { - throw new ExecutionException( - String.format("The table metadata already exists: %s", tableMetadataKey)); - } - if (metadataTable.isEmpty()) { - insertMetadataTable( - TABLE_METADATA_TABLE, - Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata))); + Map newNamespaceMetadataTable = + namespaceNames.stream() + .collect( + Collectors.toMap(namespace -> namespace, ObjectStorageNamespaceMetadata::new)); + if (namespaceMetadataTable.isEmpty()) { + insertMetadataTable(NAMESPACE_METADATA_TABLE, newNamespaceMetadataTable); } else { - metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); updateMetadataTable( - TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); - } - } catch (Exception e) { - throw new ExecutionException( - String.format("Failed to insert the table metadata: %s", tableMetadataKey), e); - } - } - - private void upsertNamespaceMetadata(String namespace) throws ExecutionException { - try { - Map readVersionMap = new HashMap<>(); - Map metadataTable = - getNamespaceMetadataTable(readVersionMap); - if (metadataTable.isEmpty()) { - insertMetadataTable( NAMESPACE_METADATA_TABLE, - Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace))); - } else { - metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); - updateMetadataTable( - NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); + newNamespaceMetadataTable, + readVersionMap.get(NAMESPACE_METADATA_TABLE)); } } catch (Exception e) { - throw new ExecutionException( - String.format("Failed to upsert the namespace metadata: %s", namespace), e); - } - } - - private void upsertTableMetadata(String namespace, String table, TableMetadata metadata) - throws ExecutionException { - String tableMetadataKey = getTableMetadataKey(namespace, table); - try { - Map readVersionMap = new HashMap<>(); - Map metadataTable = getTableMetadataTable(readVersionMap); - if (metadataTable.isEmpty()) { - insertMetadataTable( - TABLE_METADATA_TABLE, - Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata))); - } else { - metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); - updateMetadataTable( - TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); - } - } catch (Exception e) { - throw new ExecutionException( - String.format("Failed to upsert the table metadata: %s", tableMetadataKey), e); - } - } - - private void deleteNamespaceMetadata(String namespace) throws ExecutionException { - try { - Map readVersionMap = new HashMap<>(); - Map metadataTable = - getNamespaceMetadataTable(readVersionMap); - if (metadataTable.isEmpty() || !metadataTable.containsKey(namespace)) { - throw new ExecutionException( - String.format("The namespace metadata does not exist: %s", namespace)); - } - metadataTable.remove(namespace); - String readVersion = readVersionMap.get(NAMESPACE_METADATA_TABLE); - if (metadataTable.isEmpty()) { - deleteMetadataTable(NAMESPACE_METADATA_TABLE, readVersion); - } else { - updateMetadataTable(NAMESPACE_METADATA_TABLE, metadataTable, readVersion); - } - } catch (Exception e) { - throw new ExecutionException( - String.format("Failed to delete the namespace metadata: %s", namespace), e); - } - } - - private void deleteTableMetadata(String namespace, String table) throws ExecutionException { - String tableMetadataKey = getTableMetadataKey(namespace, table); - try { - Map readVersionMap = new HashMap<>(); - Map metadataTable = getTableMetadataTable(readVersionMap); - if (metadataTable.isEmpty() || !metadataTable.containsKey(tableMetadataKey)) { - throw new ExecutionException( - String.format("The table metadata does not exist: %s", tableMetadataKey)); - } - metadataTable.remove(tableMetadataKey); - String readVersion = readVersionMap.get(TABLE_METADATA_TABLE); - if (metadataTable.isEmpty()) { - deleteMetadataTable(TABLE_METADATA_TABLE, readVersion); - } else { - updateMetadataTable(TABLE_METADATA_TABLE, metadataTable, readVersion); - } - } catch (Exception e) { - throw new ExecutionException( - String.format("Failed to delete the table metadata: %s", tableMetadataKey), e); + throw new ExecutionException("Failed to upgrade", e); } } diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java index 956ecd24c3..b5672195b4 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java @@ -614,11 +614,6 @@ public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception String tableMetadataKey2 = "ns1" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl2"; String tableMetadataKey3 = "ns2" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl3"; - Map namespaceMetadataMap = new HashMap<>(); - String serializedNamespaceMetadata = Serializer.serialize(namespaceMetadataMap); - ObjectStorageWrapperResponse namespaceMetadataResponse = - new ObjectStorageWrapperResponse(serializedNamespaceMetadata, "version2"); - String tableMetadataObjectKey = ObjectStorageUtils.getObjectKey( METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); @@ -636,41 +631,20 @@ public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); when(wrapper.get(tableMetadataObjectKey)).thenReturn(Optional.of(tableMetadataResponse)); - // First call returns empty namespace metadata, second call returns metadata with ns1 - Map namespaceMetadataMapAfterInsert = new HashMap<>(); - namespaceMetadataMapAfterInsert.put("ns1", new ObjectStorageNamespaceMetadata("ns1")); - String serializedNamespaceMetadataAfterInsert = - Serializer.serialize(namespaceMetadataMapAfterInsert); - ObjectStorageWrapperResponse namespaceMetadataResponseAfterInsert = - new ObjectStorageWrapperResponse(serializedNamespaceMetadataAfterInsert, "version3"); - when(wrapper.get(namespaceMetadataObjectKey)) - .thenReturn(Optional.of(namespaceMetadataResponse)) - .thenReturn(Optional.of(namespaceMetadataResponseAfterInsert)); + // Mock non-existing namespace metadata + when(wrapper.get(namespaceMetadataObjectKey)).thenReturn(Optional.empty()); // Act admin.upgrade(Collections.emptyMap()); // Assert - // First namespace should trigger insert when metadata table is empty + verify(wrapper).get(tableMetadataObjectKey); verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); Map insertedMetadata = Serializer.deserialize( payloadCaptor.getValue(), new TypeReference>() {}); - assertThat(insertedMetadata).containsKey("ns1"); - - // Second namespace should trigger update (when metadata table is not empty) - verify(wrapper) - .update( - eq(namespaceMetadataObjectKey), - payloadCaptor.capture(), - eq(namespaceMetadataResponseAfterInsert.getVersion())); - - Map updatedMetadata = - Serializer.deserialize( - payloadCaptor.getValue(), - new TypeReference>() {}); - assertThat(updatedMetadata).containsKeys("ns1", "ns2"); + assertThat(insertedMetadata).containsKeys("ns1", "ns2"); } } From 2ae5145392988e7767a77ca26df260f3274a4363 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Tue, 4 Nov 2025 08:51:22 +0900 Subject: [PATCH 12/20] Fix to version metadata --- .../ObjectStorageNamespaceMetadata.java | 19 ++++++++-- .../ObjectStorageTableMetadata.java | 38 ++++++++++++++++--- 2 files changed, 47 insertions(+), 10 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java index 024a7c419b..b75247815f 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java @@ -6,22 +6,33 @@ @Immutable public class ObjectStorageNamespaceMetadata { + public static final Integer DEFAULT_VERSION = 1; private final String name; + private final Integer version; // The default constructor is required by Jackson to deserialize JSON object @SuppressWarnings("unused") public ObjectStorageNamespaceMetadata() { - this(null); + this(null, null); } - public ObjectStorageNamespaceMetadata(@Nullable String name) { + public ObjectStorageNamespaceMetadata(@Nullable String name, @Nullable Integer version) { this.name = name != null ? name : ""; + this.version = version != null ? version : DEFAULT_VERSION; + } + + public ObjectStorageNamespaceMetadata(@Nullable String name) { + this(name, DEFAULT_VERSION); } public String getName() { return name; } + public Integer getVersion() { + return version; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -32,11 +43,11 @@ public boolean equals(Object o) { } ObjectStorageNamespaceMetadata that = (ObjectStorageNamespaceMetadata) o; - return name.equals(that.name); + return name.equals(that.name) && version.equals(that.version); } @Override public int hashCode() { - return Objects.hash(name); + return Objects.hash(name, version); } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index afe04f2b94..8242368726 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -19,15 +19,17 @@ @SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}) @Immutable public class ObjectStorageTableMetadata { + public static final Integer DEFAULT_VERSION = 1; private final LinkedHashSet partitionKeyNames; private final LinkedHashSet clusteringKeyNames; private final Map clusteringOrders; private final Set secondaryIndexNames; private final Map columns; + private final Integer version; // The default constructor is required by Jackson to deserialize JSON object public ObjectStorageTableMetadata() { - this(null, null, null, null, null); + this(null, null, null, null, null, null); } public ObjectStorageTableMetadata( @@ -35,7 +37,8 @@ public ObjectStorageTableMetadata( @Nullable LinkedHashSet clusteringKeyNames, @Nullable Map clusteringOrders, @Nullable Set secondaryIndexNames, - @Nullable Map columns) { + @Nullable Map columns, + @Nullable Integer version) { this.partitionKeyNames = partitionKeyNames != null ? new LinkedHashSet<>(partitionKeyNames) : new LinkedHashSet<>(); this.clusteringKeyNames = @@ -47,9 +50,10 @@ public ObjectStorageTableMetadata( this.secondaryIndexNames = secondaryIndexNames != null ? new HashSet<>(secondaryIndexNames) : Collections.emptySet(); this.columns = columns != null ? new HashMap<>(columns) : Collections.emptyMap(); + this.version = version != null ? version : DEFAULT_VERSION; } - public ObjectStorageTableMetadata(TableMetadata tableMetadata) { + public ObjectStorageTableMetadata(TableMetadata tableMetadata, Integer version) { Map clusteringOrders = tableMetadata.getClusteringKeyNames().stream() .collect( @@ -67,6 +71,11 @@ public ObjectStorageTableMetadata(TableMetadata tableMetadata) { this.clusteringOrders = clusteringOrders; this.secondaryIndexNames = tableMetadata.getSecondaryIndexNames(); this.columns = columnTypeByName; + this.version = version; + } + + public ObjectStorageTableMetadata(TableMetadata tableMetadata) { + this(tableMetadata, DEFAULT_VERSION); } private ObjectStorageTableMetadata(Builder builder) { @@ -75,7 +84,8 @@ private ObjectStorageTableMetadata(Builder builder) { builder.clusteringKeyNames, builder.clusteringOrders, builder.secondaryIndexNames, - builder.columns); + builder.columns, + builder.version); } public LinkedHashSet getPartitionKeyNames() { @@ -98,6 +108,10 @@ public Map getColumns() { return columns; } + public Integer getVersion() { + return version; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -111,13 +125,19 @@ public boolean equals(Object o) { && Objects.equals(clusteringKeyNames, that.clusteringKeyNames) && Objects.equals(clusteringOrders, that.clusteringOrders) && Objects.equals(secondaryIndexNames, that.secondaryIndexNames) - && Objects.equals(columns, that.columns); + && Objects.equals(columns, that.columns) + && Objects.equals(version, that.version); } @Override public int hashCode() { return Objects.hash( - partitionKeyNames, clusteringKeyNames, clusteringOrders, secondaryIndexNames, columns); + partitionKeyNames, + clusteringKeyNames, + clusteringOrders, + secondaryIndexNames, + columns, + version); } public TableMetadata toTableMetadata() { @@ -169,6 +189,7 @@ public static final class Builder { private Map clusteringOrders; private Set secondaryIndexNames; private Map columns; + private Integer version; private Builder() {} @@ -197,6 +218,11 @@ public ObjectStorageTableMetadata.Builder columns(Map val) { return this; } + public ObjectStorageTableMetadata.Builder version(Integer val) { + version = val; + return this; + } + public ObjectStorageTableMetadata build() { return new ObjectStorageTableMetadata(this); } From c885ff813defd7db1e7f7c7931a9949562bc5a55 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Tue, 4 Nov 2025 15:45:21 +0900 Subject: [PATCH 13/20] Apply suggestion --- .../ObjectStorageAdminTestUtils.java | 8 +++++++- .../ObjectStorageNamespaceMetadata.java | 13 ++++++------ .../ObjectStorageTableMetadata.java | 20 +++++++++---------- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java index ff52a25b70..49325fde31 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java @@ -3,6 +3,8 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.scalar.db.config.DatabaseConfig; import com.scalar.db.util.AdminTestUtils; +import java.util.Collections; +import java.util.LinkedHashSet; import java.util.Map; import java.util.Optional; import java.util.Properties; @@ -67,7 +69,11 @@ public void corruptMetadata(String namespace, String table) throws Exception { String tableMetadataKey = String.join( String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), namespace, table); - metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata()); + metadataTable.put( + tableMetadataKey, + ObjectStorageTableMetadata.newBuilder() + .partitionKeyNames(new LinkedHashSet<>(Collections.singletonList("corrupted"))) + .build()); wrapper.update(objectKey, Serializer.serialize(metadataTable), response.get().getVersion()); } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java index b75247815f..b80b9c4ff7 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java @@ -1,5 +1,7 @@ package com.scalar.db.storage.objectstorage; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; @@ -10,13 +12,10 @@ public class ObjectStorageNamespaceMetadata { private final String name; private final Integer version; - // The default constructor is required by Jackson to deserialize JSON object - @SuppressWarnings("unused") - public ObjectStorageNamespaceMetadata() { - this(null, null); - } - - public ObjectStorageNamespaceMetadata(@Nullable String name, @Nullable Integer version) { + @JsonCreator + public ObjectStorageNamespaceMetadata( + @JsonProperty("name") @Nullable String name, + @JsonProperty("version") @Nullable Integer version) { this.name = name != null ? name : ""; this.version = version != null ? version : DEFAULT_VERSION; } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 8242368726..528279066f 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -1,5 +1,7 @@ package com.scalar.db.storage.objectstorage; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import com.scalar.db.api.Scan; import com.scalar.db.api.TableMetadata; import com.scalar.db.io.DataType; @@ -27,18 +29,14 @@ public class ObjectStorageTableMetadata { private final Map columns; private final Integer version; - // The default constructor is required by Jackson to deserialize JSON object - public ObjectStorageTableMetadata() { - this(null, null, null, null, null, null); - } - + @JsonCreator public ObjectStorageTableMetadata( - @Nullable LinkedHashSet partitionKeyNames, - @Nullable LinkedHashSet clusteringKeyNames, - @Nullable Map clusteringOrders, - @Nullable Set secondaryIndexNames, - @Nullable Map columns, - @Nullable Integer version) { + @JsonProperty("partitionKeyNames") @Nullable LinkedHashSet partitionKeyNames, + @JsonProperty("clusteringKeyNames") @Nullable LinkedHashSet clusteringKeyNames, + @JsonProperty("clusteringOrders") @Nullable Map clusteringOrders, + @JsonProperty("secondaryIndexNames") @Nullable Set secondaryIndexNames, + @JsonProperty("columns") @Nullable Map columns, + @JsonProperty("version") @Nullable Integer version) { this.partitionKeyNames = partitionKeyNames != null ? new LinkedHashSet<>(partitionKeyNames) : new LinkedHashSet<>(); this.clusteringKeyNames = From ed603726c356f0887ea892c2ba2b8bcde2b5b01c Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Tue, 4 Nov 2025 21:01:46 +0900 Subject: [PATCH 14/20] Apply suggestions --- .github/workflows/ci.yaml | 4 +- build.gradle | 2 +- core/build.gradle | 2 +- .../BlobWrapperIntegrationTest.java | 11 ----- .../objectstorage/ObjectStorageEnv.java | 4 +- ...jectStorageWrapperIntegrationTestBase.java | 6 ++- .../objectstorage/ObjectStorageUtils.java | 6 +-- .../ObjectStorageWrapperFactory.java | 10 ++--- .../objectstorage/blob/BlobProvider.java | 11 ----- .../BlobStorageConfig.java} | 12 +++--- .../blobstorage/BlobStorageProvider.java | 11 +++++ .../BlobStorageWrapper.java} | 6 +-- ...m.scalar.db.api.DistributedStorageProvider | 2 +- ...igTest.java => BlobStorageConfigTest.java} | 42 ++++++++++--------- 14 files changed, 61 insertions(+), 68 deletions(-) delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java delete mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java rename core/src/main/java/com/scalar/db/storage/objectstorage/{blob/BlobConfig.java => blobstorage/BlobStorageConfig.java} (93%) create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java rename core/src/main/java/com/scalar/db/storage/objectstorage/{blob/BlobWrapper.java => blobstorage/BlobStorageWrapper.java} (97%) rename core/src/test/java/com/scalar/db/storage/objectstorage/{BlobConfigTest.java => BlobStorageConfigTest.java} (76%) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 64b6af8ed2..07b0c9a5ba 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -2122,8 +2122,8 @@ jobs: name: tidb_8_5_integration_test_reports_${{ matrix.mode.label }} path: core/build/reports/tests/integrationTestJdbc - integration-test-for-blob: - name: Azure Blob Storage integration test (${{ matrix.mode.label }}) + integration-test-for-blob-storage: + name: Blob Storage integration test (${{ matrix.mode.label }}) runs-on: ubuntu-latest strategy: diff --git a/build.gradle b/build.gradle index 3d9c44c594..3ea43f4b29 100644 --- a/build.gradle +++ b/build.gradle @@ -28,7 +28,7 @@ subprojects { slf4jVersion = '1.7.36' cassandraDriverVersion = '3.11.5' azureCosmosVersion = '4.75.0' - azureBlobVersion = '12.31.3' + azureBlobStorageVersion = '12.31.3' jooqVersion = '3.14.16' awssdkVersion = '2.36.2' commonsDbcp2Version = '2.13.0' diff --git a/core/build.gradle b/core/build.gradle index 36e2285699..1b86bff0df 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -169,7 +169,7 @@ dependencies { implementation "org.slf4j:slf4j-api:${slf4jVersion}" implementation "com.datastax.cassandra:cassandra-driver-core:${cassandraDriverVersion}" implementation "com.azure:azure-cosmos:${azureCosmosVersion}" - implementation "com.azure:azure-storage-blob:${azureBlobVersion}" + implementation "com.azure:azure-storage-blob:${azureBlobStorageVersion}" implementation "org.jooq:jooq:${jooqVersion}" implementation platform("software.amazon.awssdk:bom:${awssdkVersion}") implementation 'software.amazon.awssdk:applicationautoscaling' diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java deleted file mode 100644 index 9983b05f88..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/BlobWrapperIntegrationTest.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import java.util.Properties; - -public class BlobWrapperIntegrationTest extends ObjectStorageWrapperIntegrationTestBase { - - @Override - protected Properties getProperties(String testName) { - return ObjectStorageEnv.getProperties(testName); - } -} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java index f4bd3f0fed..ccee56e5a1 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java @@ -1,7 +1,7 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.storage.objectstorage.blob.BlobConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; import java.util.Collections; import java.util.Map; import java.util.Properties; @@ -30,7 +30,7 @@ public static Properties getProperties(String testName) { properties.setProperty(DatabaseConfig.CONTACT_POINTS, endpoint); properties.setProperty(DatabaseConfig.USERNAME, accountName); properties.setProperty(DatabaseConfig.PASSWORD, accountKey); - properties.setProperty(DatabaseConfig.STORAGE, BlobConfig.STORAGE_NAME); + properties.setProperty(DatabaseConfig.STORAGE, BlobStorageConfig.STORAGE_NAME); properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN, "true"); properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_FILTERING, "true"); properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_ORDERING, "false"); diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java index 4add81ec7e..25c2b82cdf 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java @@ -15,7 +15,7 @@ import org.slf4j.LoggerFactory; @TestInstance(TestInstance.Lifecycle.PER_CLASS) -public abstract class ObjectStorageWrapperIntegrationTestBase { +public class ObjectStorageWrapperIntegrationTestBase { private static final Logger logger = LoggerFactory.getLogger(ObjectStorageWrapperIntegrationTestBase.class); @@ -56,7 +56,9 @@ public void afterAll() { } } - protected abstract Properties getProperties(String testName); + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } private void createObjects() throws ObjectStorageWrapperException { wrapper.insert(TEST_KEY1, TEST_OBJECT1); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java index cfb62dd444..ac04b7df21 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java @@ -1,7 +1,7 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.storage.objectstorage.blob.BlobConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; import java.util.Objects; public class ObjectStorageUtils { @@ -17,8 +17,8 @@ public static String getObjectKey(String namespace, String table) { } public static ObjectStorageConfig getObjectStorageConfig(DatabaseConfig databaseConfig) { - if (Objects.equals(databaseConfig.getStorage(), BlobConfig.STORAGE_NAME)) { - return new BlobConfig(databaseConfig); + if (Objects.equals(databaseConfig.getStorage(), BlobStorageConfig.STORAGE_NAME)) { + return new BlobStorageConfig(databaseConfig); } else { throw new IllegalArgumentException( "Unsupported Object Storage: " + databaseConfig.getStorage()); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java index 46199c90e5..2fb1eda076 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java @@ -1,15 +1,15 @@ package com.scalar.db.storage.objectstorage; -import com.scalar.db.storage.objectstorage.blob.BlobConfig; -import com.scalar.db.storage.objectstorage.blob.BlobWrapper; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageWrapper; import java.util.Objects; public class ObjectStorageWrapperFactory { public static ObjectStorageWrapper create(ObjectStorageConfig objectStorageConfig) { - if (Objects.equals(objectStorageConfig.getStorageName(), BlobConfig.STORAGE_NAME)) { - assert objectStorageConfig instanceof BlobConfig; - return new BlobWrapper((BlobConfig) objectStorageConfig); + if (Objects.equals(objectStorageConfig.getStorageName(), BlobStorageConfig.STORAGE_NAME)) { + assert objectStorageConfig instanceof BlobStorageConfig; + return new BlobStorageWrapper((BlobStorageConfig) objectStorageConfig); } else { throw new IllegalArgumentException( "Unsupported Object Storage: " + objectStorageConfig.getStorageName()); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java deleted file mode 100644 index e4676bebfb..0000000000 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobProvider.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.scalar.db.storage.objectstorage.blob; - -import com.scalar.db.storage.objectstorage.ObjectStorageProvider; - -public class BlobProvider implements ObjectStorageProvider { - - @Override - public String getName() { - return BlobConfig.STORAGE_NAME; - } -} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java similarity index 93% rename from core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobConfig.java rename to core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java index 6ba78e24ca..50311787e9 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobConfig.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java @@ -1,4 +1,4 @@ -package com.scalar.db.storage.objectstorage.blob; +package com.scalar.db.storage.objectstorage.blobstorage; import static com.scalar.db.config.ConfigUtils.getInt; import static com.scalar.db.config.ConfigUtils.getLong; @@ -10,8 +10,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class BlobConfig implements ObjectStorageConfig { - public static final String STORAGE_NAME = "blob"; +public class BlobStorageConfig implements ObjectStorageConfig { + public static final String STORAGE_NAME = "blob-storage"; public static final String PREFIX = DatabaseConfig.PREFIX + STORAGE_NAME + "."; public static final String PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = @@ -31,7 +31,7 @@ public class BlobConfig implements ObjectStorageConfig { public static final long DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = 4 * 1024 * 1024; // 4MB public static final int DEFAULT_REQUEST_TIMEOUT_IN_SECONDS = 15; - private static final Logger logger = LoggerFactory.getLogger(BlobConfig.class); + private static final Logger logger = LoggerFactory.getLogger(BlobStorageConfig.class); private final String endpoint; private final String username; private final String password; @@ -43,7 +43,7 @@ public class BlobConfig implements ObjectStorageConfig { private final long parallelUploadThresholdInBytes; private final int requestTimeoutInSeconds; - public BlobConfig(DatabaseConfig databaseConfig) { + public BlobStorageConfig(DatabaseConfig databaseConfig) { String storage = databaseConfig.getStorage(); if (!storage.equals(STORAGE_NAME)) { throw new IllegalArgumentException( @@ -82,7 +82,7 @@ public BlobConfig(DatabaseConfig databaseConfig) { logger.warn( "The configuration property \"" + DatabaseConfig.SCAN_FETCH_SIZE - + "\" is not applicable to Blob storage and will be ignored."); + + "\" is not applicable to Blob Storage and will be ignored."); } parallelUploadBlockSizeInBytes = diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java new file mode 100644 index 0000000000..dcf1242d3d --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java @@ -0,0 +1,11 @@ +package com.scalar.db.storage.objectstorage.blobstorage; + +import com.scalar.db.storage.objectstorage.ObjectStorageProvider; + +public class BlobStorageProvider implements ObjectStorageProvider { + + @Override + public String getName() { + return BlobStorageConfig.STORAGE_NAME; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageWrapper.java similarity index 97% rename from core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java rename to core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageWrapper.java index 71b42d1f49..b69f11cec1 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blob/BlobWrapper.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageWrapper.java @@ -1,4 +1,4 @@ -package com.scalar.db.storage.objectstorage.blob; +package com.scalar.db.storage.objectstorage.blobstorage; import com.azure.core.http.HttpHeaderName; import com.azure.core.util.BinaryData; @@ -23,12 +23,12 @@ import java.util.Set; import java.util.stream.Collectors; -public class BlobWrapper implements ObjectStorageWrapper { +public class BlobStorageWrapper implements ObjectStorageWrapper { private final BlobContainerClient client; private final Duration requestTimeoutInSeconds; private final ParallelTransferOptions parallelTransferOptions; - public BlobWrapper(BlobConfig config) { + public BlobStorageWrapper(BlobStorageConfig config) { this.client = new BlobServiceClientBuilder() .endpoint(config.getEndpoint()) diff --git a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider index 1995926861..e9ffeef06e 100644 --- a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider +++ b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider @@ -2,5 +2,5 @@ com.scalar.db.storage.cassandra.CassandraProvider com.scalar.db.storage.cosmos.CosmosProvider com.scalar.db.storage.dynamo.DynamoProvider com.scalar.db.storage.jdbc.JdbcProvider -com.scalar.db.storage.objectstorage.blob.BlobProvider +com.scalar.db.storage.objectstorage.blobstorage.BlobStorageProvider com.scalar.db.storage.multistorage.MultiStorageProvider diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java similarity index 76% rename from core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java rename to core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java index c9c7dcbb59..5a4a561ae8 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobConfigTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java @@ -4,17 +4,17 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.storage.objectstorage.blob.BlobConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; import java.util.Properties; import org.junit.jupiter.api.Test; -public class BlobConfigTest { +public class BlobStorageConfigTest { private static final String ANY_USERNAME = "any_user"; private static final String ANY_PASSWORD = "any_password"; private static final String ANY_BUCKET = "bucket"; private static final String ANY_ENDPOINT = "http://localhost:10000/" + ANY_USERNAME; private static final String ANY_CONTACT_POINT = ANY_ENDPOINT + "/" + ANY_BUCKET; - private static final String BLOB_STORAGE = "blob"; + private static final String BLOB_STORAGE = "blob-storage"; private static final String ANY_TABLE_METADATA_NAMESPACE = "any_namespace"; private static final String ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = "5242880"; // 5MB private static final String ANY_PARALLEL_UPLOAD_MAX_PARALLELISM = "4"; @@ -31,15 +31,17 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() { props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); props.setProperty(DatabaseConfig.SYSTEM_NAMESPACE_NAME, ANY_TABLE_METADATA_NAMESPACE); props.setProperty( - BlobConfig.PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + BlobStorageConfig.PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, + ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); props.setProperty( - BlobConfig.PARALLEL_UPLOAD_MAX_PARALLELISM, ANY_PARALLEL_UPLOAD_MAX_PARALLELISM); + BlobStorageConfig.PARALLEL_UPLOAD_MAX_PARALLELISM, ANY_PARALLEL_UPLOAD_MAX_PARALLELISM); props.setProperty( - BlobConfig.PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); - props.setProperty(BlobConfig.REQUEST_TIMEOUT_IN_SECONDS, ANY_REQUEST_TIMEOUT_IN_SECONDS); + BlobStorageConfig.PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, + ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + props.setProperty(BlobStorageConfig.REQUEST_TIMEOUT_IN_SECONDS, ANY_REQUEST_TIMEOUT_IN_SECONDS); // Act - BlobConfig config = new BlobConfig(new DatabaseConfig(props)); + BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); // Assert assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); @@ -68,7 +70,7 @@ public void constructor_PropertiesWithoutOptimizationOptionsGiven_ShouldLoadProp props.setProperty(DatabaseConfig.SYSTEM_NAMESPACE_NAME, ANY_TABLE_METADATA_NAMESPACE); // Act - BlobConfig config = new BlobConfig(new DatabaseConfig(props)); + BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); // Assert assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); @@ -77,13 +79,13 @@ public void constructor_PropertiesWithoutOptimizationOptionsGiven_ShouldLoadProp assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE); assertThat(config.getParallelUploadBlockSizeInBytes()) - .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); assertThat(config.getParallelUploadMaxParallelism()) - .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); assertThat(config.getParallelUploadThresholdInBytes()) - .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); assertThat(config.getRequestTimeoutInSeconds()) - .isEqualTo(BlobConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); + .isEqualTo(BlobStorageConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); } @Test @@ -95,7 +97,7 @@ public void constructor_WithoutStorage_ShouldThrowIllegalArgumentException() { props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); // Act Assert - assertThatThrownBy(() -> new BlobConfig(new DatabaseConfig(props))) + assertThatThrownBy(() -> new BlobStorageConfig(new DatabaseConfig(props))) .isInstanceOf(IllegalArgumentException.class); } @@ -109,7 +111,7 @@ public void constructor_WithoutSystemNamespaceName_ShouldLoadProperly() { props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); // Act - BlobConfig config = new BlobConfig(new DatabaseConfig(props)); + BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); // Assert assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); @@ -119,13 +121,13 @@ public void constructor_WithoutSystemNamespaceName_ShouldLoadProperly() { assertThat(config.getMetadataNamespace()) .isEqualTo(DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME); assertThat(config.getParallelUploadBlockSizeInBytes()) - .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); assertThat(config.getParallelUploadMaxParallelism()) - .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); assertThat(config.getParallelUploadThresholdInBytes()) - .isEqualTo(BlobConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); assertThat(config.getRequestTimeoutInSeconds()) - .isEqualTo(BlobConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); + .isEqualTo(BlobStorageConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); } @Test @@ -137,7 +139,7 @@ public void constructor_WithoutSystemNamespaceName_ShouldLoadProperly() { props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); // Act - assertThatThrownBy(() -> new BlobConfig(new DatabaseConfig(props))) + assertThatThrownBy(() -> new BlobStorageConfig(new DatabaseConfig(props))) .isInstanceOf(IllegalArgumentException.class); } } From 1c208228e9d47f910b4c35527f8c536b37cafa9a Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Tue, 4 Nov 2025 21:05:01 +0900 Subject: [PATCH 15/20] Refactor BlobStorageConfigTest --- .../objectstorage/BlobStorageConfigTest.java | 33 +------------------ 1 file changed, 1 insertion(+), 32 deletions(-) diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java index 5a4a561ae8..498d2ddd17 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java @@ -60,14 +60,13 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() { } @Test - public void constructor_PropertiesWithoutOptimizationOptionsGiven_ShouldLoadProperly() { + public void constructor_PropertiesWithoutNonMandatoryOptionsGiven_ShouldLoadProperly() { // Arrange Properties props = new Properties(); props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); - props.setProperty(DatabaseConfig.SYSTEM_NAMESPACE_NAME, ANY_TABLE_METADATA_NAMESPACE); // Act BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); @@ -77,7 +76,6 @@ public void constructor_PropertiesWithoutOptimizationOptionsGiven_ShouldLoadProp assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); - assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE); assertThat(config.getParallelUploadBlockSizeInBytes()) .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); assertThat(config.getParallelUploadMaxParallelism()) @@ -101,35 +99,6 @@ public void constructor_WithoutStorage_ShouldThrowIllegalArgumentException() { .isInstanceOf(IllegalArgumentException.class); } - @Test - public void constructor_WithoutSystemNamespaceName_ShouldLoadProperly() { - // Arrange - Properties props = new Properties(); - props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); - props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); - props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); - props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); - - // Act - BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); - - // Assert - assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); - assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); - assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); - assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); - assertThat(config.getMetadataNamespace()) - .isEqualTo(DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME); - assertThat(config.getParallelUploadBlockSizeInBytes()) - .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); - assertThat(config.getParallelUploadMaxParallelism()) - .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); - assertThat(config.getParallelUploadThresholdInBytes()) - .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); - assertThat(config.getRequestTimeoutInSeconds()) - .isEqualTo(BlobStorageConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); - } - @Test public void constructor_PropertiesWithEmptyContactPointsGiven_ShouldThrowIllegalArgumentException() { From fb4ab41920dfbccf962ad2c2dd6ff2c26ed31431 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Wed, 5 Nov 2025 11:27:54 +0900 Subject: [PATCH 16/20] Remove metadata versioning --- .../ObjectStorageNamespaceMetadata.java | 19 ++-------- .../ObjectStorageTableMetadata.java | 36 +++---------------- 2 files changed, 8 insertions(+), 47 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java index b80b9c4ff7..3c4e0588db 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java @@ -8,30 +8,17 @@ @Immutable public class ObjectStorageNamespaceMetadata { - public static final Integer DEFAULT_VERSION = 1; private final String name; - private final Integer version; @JsonCreator - public ObjectStorageNamespaceMetadata( - @JsonProperty("name") @Nullable String name, - @JsonProperty("version") @Nullable Integer version) { + public ObjectStorageNamespaceMetadata(@JsonProperty("name") @Nullable String name) { this.name = name != null ? name : ""; - this.version = version != null ? version : DEFAULT_VERSION; - } - - public ObjectStorageNamespaceMetadata(@Nullable String name) { - this(name, DEFAULT_VERSION); } public String getName() { return name; } - public Integer getVersion() { - return version; - } - @Override public boolean equals(Object o) { if (this == o) { @@ -42,11 +29,11 @@ public boolean equals(Object o) { } ObjectStorageNamespaceMetadata that = (ObjectStorageNamespaceMetadata) o; - return name.equals(that.name) && version.equals(that.version); + return name.equals(that.name); } @Override public int hashCode() { - return Objects.hash(name, version); + return Objects.hash(name); } } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 528279066f..38cbfcfad0 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -21,13 +21,11 @@ @SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}) @Immutable public class ObjectStorageTableMetadata { - public static final Integer DEFAULT_VERSION = 1; private final LinkedHashSet partitionKeyNames; private final LinkedHashSet clusteringKeyNames; private final Map clusteringOrders; private final Set secondaryIndexNames; private final Map columns; - private final Integer version; @JsonCreator public ObjectStorageTableMetadata( @@ -35,8 +33,7 @@ public ObjectStorageTableMetadata( @JsonProperty("clusteringKeyNames") @Nullable LinkedHashSet clusteringKeyNames, @JsonProperty("clusteringOrders") @Nullable Map clusteringOrders, @JsonProperty("secondaryIndexNames") @Nullable Set secondaryIndexNames, - @JsonProperty("columns") @Nullable Map columns, - @JsonProperty("version") @Nullable Integer version) { + @JsonProperty("columns") @Nullable Map columns) { this.partitionKeyNames = partitionKeyNames != null ? new LinkedHashSet<>(partitionKeyNames) : new LinkedHashSet<>(); this.clusteringKeyNames = @@ -48,10 +45,9 @@ public ObjectStorageTableMetadata( this.secondaryIndexNames = secondaryIndexNames != null ? new HashSet<>(secondaryIndexNames) : Collections.emptySet(); this.columns = columns != null ? new HashMap<>(columns) : Collections.emptyMap(); - this.version = version != null ? version : DEFAULT_VERSION; } - public ObjectStorageTableMetadata(TableMetadata tableMetadata, Integer version) { + public ObjectStorageTableMetadata(TableMetadata tableMetadata) { Map clusteringOrders = tableMetadata.getClusteringKeyNames().stream() .collect( @@ -69,11 +65,6 @@ public ObjectStorageTableMetadata(TableMetadata tableMetadata, Integer version) this.clusteringOrders = clusteringOrders; this.secondaryIndexNames = tableMetadata.getSecondaryIndexNames(); this.columns = columnTypeByName; - this.version = version; - } - - public ObjectStorageTableMetadata(TableMetadata tableMetadata) { - this(tableMetadata, DEFAULT_VERSION); } private ObjectStorageTableMetadata(Builder builder) { @@ -82,8 +73,7 @@ private ObjectStorageTableMetadata(Builder builder) { builder.clusteringKeyNames, builder.clusteringOrders, builder.secondaryIndexNames, - builder.columns, - builder.version); + builder.columns); } public LinkedHashSet getPartitionKeyNames() { @@ -106,10 +96,6 @@ public Map getColumns() { return columns; } - public Integer getVersion() { - return version; - } - @Override public boolean equals(Object o) { if (this == o) { @@ -123,19 +109,13 @@ public boolean equals(Object o) { && Objects.equals(clusteringKeyNames, that.clusteringKeyNames) && Objects.equals(clusteringOrders, that.clusteringOrders) && Objects.equals(secondaryIndexNames, that.secondaryIndexNames) - && Objects.equals(columns, that.columns) - && Objects.equals(version, that.version); + && Objects.equals(columns, that.columns); } @Override public int hashCode() { return Objects.hash( - partitionKeyNames, - clusteringKeyNames, - clusteringOrders, - secondaryIndexNames, - columns, - version); + partitionKeyNames, clusteringKeyNames, clusteringOrders, secondaryIndexNames, columns); } public TableMetadata toTableMetadata() { @@ -187,7 +167,6 @@ public static final class Builder { private Map clusteringOrders; private Set secondaryIndexNames; private Map columns; - private Integer version; private Builder() {} @@ -216,11 +195,6 @@ public ObjectStorageTableMetadata.Builder columns(Map val) { return this; } - public ObjectStorageTableMetadata.Builder version(Integer val) { - version = val; - return this; - } - public ObjectStorageTableMetadata build() { return new ObjectStorageTableMetadata(this); } From df628e5d3845f00120a20aef98dc7a132e4c5d08 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Wed, 5 Nov 2025 17:10:22 +0900 Subject: [PATCH 17/20] Apply suggestions --- ...AdminIntegrationTestWithObjectStorage.java | 5 +++ ...geAdminCaseSensitivityIntegrationTest.java | 5 +++ .../ObjectStorageAdminIntegrationTest.java | 5 +++ ... ObjectStorageWrapperIntegrationTest.java} | 4 +- ...AdminIntegrationTestWithObjectStorage.java | 5 +++ .../objectstorage/ObjectStorageAdmin.java | 28 +------------ .../blobstorage/BlobStorageConfig.java | 20 +--------- .../objectstorage/ObjectStorageAdminTest.java | 39 ++----------------- .../BlobStorageConfigTest.java | 3 +- 9 files changed, 28 insertions(+), 86 deletions(-) rename core/src/integration-test/java/com/scalar/db/storage/objectstorage/{ObjectStorageWrapperIntegrationTestBase.java => ObjectStorageWrapperIntegrationTest.java} (99%) rename core/src/test/java/com/scalar/db/storage/objectstorage/{ => blobstorage}/BlobStorageConfigTest.java (97%) diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index bb16d913c2..bccbc3cde1 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -130,4 +130,9 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC @Override @Disabled("Object Storage does not support renaming tables") public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("There is nothing that needs to be upgraded with Object Storage") + public void + upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java index 084136f904..b26b9cd5a9 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -145,4 +145,9 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC @Override @Disabled("Object Storage does not support renaming tables") public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("There is nothing that needs to be upgraded with Object Storage") + public void + upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index 6738085b55..27a9e51399 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -143,4 +143,9 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC @Override @Disabled("Object Storage does not support renaming tables") public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("There is nothing that needs to be upgraded with Object Storage") + public void + upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java similarity index 99% rename from core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java rename to core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java index 25c2b82cdf..e32ab2ebac 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTestBase.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java @@ -15,9 +15,9 @@ import org.slf4j.LoggerFactory; @TestInstance(TestInstance.Lifecycle.PER_CLASS) -public class ObjectStorageWrapperIntegrationTestBase { +public class ObjectStorageWrapperIntegrationTest { private static final Logger logger = - LoggerFactory.getLogger(ObjectStorageWrapperIntegrationTestBase.class); + LoggerFactory.getLogger(ObjectStorageWrapperIntegrationTest.class); private static final String TEST_NAME = "object_storage_wrapper_integration_test"; private static final String TEST_KEY1 = "test-key1"; diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java index 9476edba7b..0714f0770e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -124,4 +124,9 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC @Override @Disabled("Object Storage does not support renaming tables") public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("There is nothing that needs to be upgraded with Object Storage") + public void + upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java index 0804762e4d..b4d740a6b8 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -370,33 +370,7 @@ public Set getNamespaceNames() throws ExecutionException { @Override public void upgrade(Map options) throws ExecutionException { - try { - // Get all namespace names from the table metadata table - Map tableMetadataTable = getTableMetadataTable(); - List namespaceNames = - tableMetadataTable.keySet().stream() - .map(ObjectStorageAdmin::getNamespaceNameFromTableMetadataKey) - .distinct() - .collect(Collectors.toList()); - // Upsert the namespace metadata table - Map readVersionMap = new HashMap<>(); - Map namespaceMetadataTable = - getNamespaceMetadataTable(readVersionMap); - Map newNamespaceMetadataTable = - namespaceNames.stream() - .collect( - Collectors.toMap(namespace -> namespace, ObjectStorageNamespaceMetadata::new)); - if (namespaceMetadataTable.isEmpty()) { - insertMetadataTable(NAMESPACE_METADATA_TABLE, newNamespaceMetadataTable); - } else { - updateMetadataTable( - NAMESPACE_METADATA_TABLE, - newNamespaceMetadataTable, - readVersionMap.get(NAMESPACE_METADATA_TABLE)); - } - } catch (Exception e) { - throw new ExecutionException("Failed to upgrade", e); - } + // Currently, nothing needs to be upgraded. Do nothing. } private Map getNamespaceMetadataTable() diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java index 50311787e9..f77b51bcf8 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java @@ -2,7 +2,6 @@ import static com.scalar.db.config.ConfigUtils.getInt; import static com.scalar.db.config.ConfigUtils.getLong; -import static com.scalar.db.config.ConfigUtils.getString; import com.scalar.db.common.CoreError; import com.scalar.db.config.DatabaseConfig; @@ -22,10 +21,6 @@ public class BlobStorageConfig implements ObjectStorageConfig { PREFIX + "parallel_upload_threshold_in_bytes"; public static final String REQUEST_TIMEOUT_IN_SECONDS = PREFIX + "request_timeout_in_seconds"; - /** @deprecated As of 5.0, will be removed. */ - @Deprecated - public static final String TABLE_METADATA_NAMESPACE = PREFIX + "table_metadata.namespace"; - public static final long DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = 4 * 1024 * 1024; // 4MB public static final int DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM = 4; public static final long DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = 4 * 1024 * 1024; // 4MB @@ -63,20 +58,7 @@ public BlobStorageConfig(DatabaseConfig databaseConfig) { } username = databaseConfig.getUsername().orElse(null); password = databaseConfig.getPassword().orElse(null); - - if (databaseConfig.getProperties().containsKey(TABLE_METADATA_NAMESPACE)) { - logger.warn( - "The configuration property \"" - + TABLE_METADATA_NAMESPACE - + "\" is deprecated and will be removed in 5.0.0."); - metadataNamespace = - getString( - databaseConfig.getProperties(), - TABLE_METADATA_NAMESPACE, - DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME); - } else { - metadataNamespace = databaseConfig.getSystemNamespaceName(); - } + metadataNamespace = databaseConfig.getSystemNamespaceName(); if (databaseConfig.getScanFetchSize() != DatabaseConfig.DEFAULT_SCAN_FETCH_SIZE) { logger.warn( diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java index b5672195b4..71e1ebbe1e 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java @@ -608,43 +608,10 @@ public void repairTable_ShouldUpsertTableMetadata() throws Exception { } @Test - public void upgrade_WithExistingTables_ShouldUpsertNamespaces() throws Exception { + public void upgrade_ShouldDoNothing() { // Arrange - String tableMetadataKey1 = "ns1" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl1"; - String tableMetadataKey2 = "ns1" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl2"; - String tableMetadataKey3 = "ns2" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "tbl3"; - String tableMetadataObjectKey = - ObjectStorageUtils.getObjectKey( - METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); - String namespaceMetadataObjectKey = - ObjectStorageUtils.getObjectKey( - METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); - - // Mock table metadata to return existing tables - Map tableMetadataMap = new HashMap<>(); - tableMetadataMap.put(tableMetadataKey1, ObjectStorageTableMetadata.newBuilder().build()); - tableMetadataMap.put(tableMetadataKey2, ObjectStorageTableMetadata.newBuilder().build()); - tableMetadataMap.put(tableMetadataKey3, ObjectStorageTableMetadata.newBuilder().build()); - String serializedTableMetadata = Serializer.serialize(tableMetadataMap); - ObjectStorageWrapperResponse tableMetadataResponse = - new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); - when(wrapper.get(tableMetadataObjectKey)).thenReturn(Optional.of(tableMetadataResponse)); - - // Mock non-existing namespace metadata - when(wrapper.get(namespaceMetadataObjectKey)).thenReturn(Optional.empty()); - - // Act - admin.upgrade(Collections.emptyMap()); - - // Assert - verify(wrapper).get(tableMetadataObjectKey); - verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); - - Map insertedMetadata = - Serializer.deserialize( - payloadCaptor.getValue(), - new TypeReference>() {}); - assertThat(insertedMetadata).containsKeys("ns1", "ns2"); + // Act Assert + assertThatCode(() -> admin.upgrade(Collections.emptyMap())).doesNotThrowAnyException(); } } diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfigTest.java similarity index 97% rename from core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java rename to core/src/test/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfigTest.java index 498d2ddd17..697b9fd230 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/BlobStorageConfigTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfigTest.java @@ -1,10 +1,9 @@ -package com.scalar.db.storage.objectstorage; +package com.scalar.db.storage.objectstorage.blobstorage; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.scalar.db.config.DatabaseConfig; -import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; import java.util.Properties; import org.junit.jupiter.api.Test; From 4e647c7e4ec568aa105d2a2d4d5ef2534c516361 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Wed, 5 Nov 2025 17:43:26 +0900 Subject: [PATCH 18/20] Resolve conflicts --- .../objectstorage/ObjectStorageAdmin.java | 16 ---------------- .../objectstorage/ObjectStorageAdminTest.java | 19 +++++-------------- 2 files changed, 5 insertions(+), 30 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java index b4d740a6b8..e918e3c52e 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -48,22 +48,6 @@ public ObjectStorageAdmin(DatabaseConfig databaseConfig) { metadataNamespace = objectStorageConfig.getMetadataNamespace(); } - @Override - public TableMetadata getImportTableMetadata( - String namespace, String table, Map overrideColumnsType) - throws ExecutionException { - throw new UnsupportedOperationException( - CoreError.OBJECT_STORAGE_IMPORT_NOT_SUPPORTED.buildMessage()); - } - - @Override - public void addRawColumnToTable( - String namespace, String table, String columnName, DataType columnType) - throws ExecutionException { - throw new UnsupportedOperationException( - CoreError.OBJECT_STORAGE_IMPORT_NOT_SUPPORTED.buildMessage()); - } - @Override public StorageInfo getStorageInfo(String namespace) throws ExecutionException { return STORAGE_INFO; diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java index 71e1ebbe1e..9eb33d1006 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java @@ -32,10 +32,8 @@ public class ObjectStorageAdminTest { @Mock private ObjectStorageWrapper wrapper; @Mock private ObjectStorageConfig config; - private ObjectStorageAdmin admin; - - @Captor private ArgumentCaptor objectKeyCaptor; @Captor private ArgumentCaptor payloadCaptor; + private ObjectStorageAdmin admin; @BeforeEach public void setUp() throws Exception { @@ -127,20 +125,15 @@ public void unsupportedOperations_ShouldThrowUnsupportedException() { catchThrowable(() -> admin.createIndex(namespace, table, column, Collections.emptyMap())); Throwable thrown2 = catchThrowable(() -> admin.dropIndex(namespace, table, column)); Throwable thrown3 = - catchThrowable( - () -> admin.getImportTableMetadata(namespace, table, Collections.emptyMap())); - Throwable thrown4 = - catchThrowable(() -> admin.addRawColumnToTable(namespace, table, column, DataType.INT)); - Throwable thrown5 = catchThrowable( () -> admin.importTable( namespace, table, Collections.emptyMap(), Collections.emptyMap())); - Throwable thrown6 = catchThrowable(() -> admin.dropColumnFromTable(namespace, table, column)); - Throwable thrown7 = + Throwable thrown4 = catchThrowable(() -> admin.dropColumnFromTable(namespace, table, column)); + Throwable thrown5 = catchThrowable(() -> admin.renameColumn(namespace, table, column, "newCol")); - Throwable thrown8 = catchThrowable(() -> admin.renameTable(namespace, table, "newTable")); - Throwable thrown9 = + Throwable thrown6 = catchThrowable(() -> admin.renameTable(namespace, table, "newTable")); + Throwable thrown7 = catchThrowable(() -> admin.alterColumnType(namespace, table, column, DataType.INT)); // Assert @@ -151,8 +144,6 @@ public void unsupportedOperations_ShouldThrowUnsupportedException() { assertThat(thrown5).isInstanceOf(UnsupportedOperationException.class); assertThat(thrown6).isInstanceOf(UnsupportedOperationException.class); assertThat(thrown7).isInstanceOf(UnsupportedOperationException.class); - assertThat(thrown8).isInstanceOf(UnsupportedOperationException.class); - assertThat(thrown9).isInstanceOf(UnsupportedOperationException.class); } @Test From 38e72c3fb3f4d0fdc20f268b7fdb9baeea6505fa Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Wed, 5 Nov 2025 22:03:32 +0900 Subject: [PATCH 19/20] Apply suggestions --- ...AdminIntegrationTestWithObjectStorage.java | 4 +-- ...geAdminCaseSensitivityIntegrationTest.java | 4 +-- .../ObjectStorageAdminIntegrationTest.java | 4 +-- ...AdminIntegrationTestWithObjectStorage.java | 4 +-- .../java/com/scalar/db/common/CoreError.java | 30 ++++++++++++------- 5 files changed, 27 insertions(+), 19 deletions(-) diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index bccbc3cde1..cf94d126b4 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -19,7 +19,7 @@ protected AdminTestUtils getAdminTestUtils(String testName) { } @Override - @Disabled("Temporary disable because it includes DML operations") + @Disabled("Temporarily disabled because it includes DML operations") public void truncateTable_ShouldTruncateProperly() {} @Override @@ -132,7 +132,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("There is nothing that needs to be upgraded with Object Storage") + @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java index b26b9cd5a9..96dedd036c 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -25,7 +25,7 @@ protected AdminTestUtils getAdminTestUtils(String testName) { } @Override - @Disabled("Temporary disable because it includes DML operations") + @Disabled("Temporarily disabled because it includes DML operations") public void truncateTable_ShouldTruncateProperly() {} @Override @@ -147,7 +147,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("There is nothing that needs to be upgraded with Object Storage") + @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index 27a9e51399..c76aca707c 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -23,7 +23,7 @@ protected AdminTestUtils getAdminTestUtils(String testName) { } @Override - @Disabled("Temporary disable because it includes DML operations") + @Disabled("Temporarily disabled because it includes DML operations") public void truncateTable_ShouldTruncateProperly() {} @Override @@ -145,7 +145,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("There is nothing that needs to be upgraded with Object Storage") + @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java index 0714f0770e..bf804b079d 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -13,7 +13,7 @@ protected Properties getProps(String testName) { } @Override - @Disabled("Temporary disable because it includes DML operations") + @Disabled("Temporarily disabled because it includes DML operations") public void truncateTable_ShouldTruncateProperly() {} @Override @@ -126,7 +126,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("There is nothing that needs to be upgraded with Object Storage") + @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index 6a126e3b63..fa097bad68 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -687,19 +687,27 @@ public enum CoreError implements ScalarDbError { COSMOS_DROP_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0217", - "Cosmos DB does not support the dropping column feature", + "Cosmos DB does not support the feature for dropping columns", "", ""), DYNAMO_DROP_COLUMN_NOT_SUPPORTED( - Category.USER_ERROR, "0218", "DynamoDB does not support the dropping column feature", "", ""), + Category.USER_ERROR, + "0218", + "DynamoDB does not support the feature for dropping columns", + "", + ""), COSMOS_RENAME_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0219", - "Cosmos DB does not support the renaming column feature", + "Cosmos DB does not support the feature for renaming columns", "", ""), DYNAMO_RENAME_COLUMN_NOT_SUPPORTED( - Category.USER_ERROR, "0220", "DynamoDB does not support the renaming column feature", "", ""), + Category.USER_ERROR, + "0220", + "DynamoDB does not support the feature for renaming columns", + "", + ""), CASSANDRA_RENAME_NON_PRIMARY_KEY_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0221", @@ -775,25 +783,25 @@ public enum CoreError implements ScalarDbError { CASSANDRA_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0235", - "Cassandra does not support the altering column type feature", + "Cassandra does not support the feature for altering column types", "", ""), COSMOS_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0236", - "Cosmos DB does not support the altering column type feature", + "Cosmos DB does not support the feature for altering column types", "", ""), DYNAMO_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0237", - "DynamoDB does not support the altering column type feature", + "DynamoDB does not support the feature for altering column types", "", ""), JDBC_SQLITE_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0238", - "SQLite does not support the altering column type feature", + "SQLite does not support the feature for altering column types", "", ""), JDBC_ORACLE_UNSUPPORTED_COLUMN_TYPE_CONVERSION( @@ -864,13 +872,13 @@ public enum CoreError implements ScalarDbError { OBJECT_STORAGE_DROP_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0252", - "Object Storage does not support the dropping column feature", + "Object Storage does not support the feature for dropping columns", "", ""), OBJECT_STORAGE_RENAME_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0253", - "Object Storage does not support the renaming column feature", + "Object Storage does not support the feature for renaming columns", "", ""), OBJECT_STORAGE_RENAME_TABLE_NOT_SUPPORTED( @@ -878,7 +886,7 @@ public enum CoreError implements ScalarDbError { OBJECT_STORAGE_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0255", - "Object Storage does not support the altering column type feature", + "Object Storage does not support the feature for altering column types", "", ""), From 63847cc852c951c2dbc8ab0dfe36fc5e7fddc391 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Thu, 6 Nov 2025 12:10:21 +0900 Subject: [PATCH 20/20] Apply suggestions --- .../ConsensusCommitAdminIntegrationTestWithObjectStorage.java | 2 +- .../ObjectStorageAdminCaseSensitivityIntegrationTest.java | 2 +- .../objectstorage/ObjectStorageAdminIntegrationTest.java | 2 +- ...erationTransactionAdminIntegrationTestWithObjectStorage.java | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index cf94d126b4..5c25c0c1fe 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -132,7 +132,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") + @Disabled("The ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java index 96dedd036c..b8710a054d 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -147,7 +147,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") + @Disabled("The ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index c76aca707c..9c10eb9fbf 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -145,7 +145,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") + @Disabled("The ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java index bf804b079d..9d7c946b5e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -126,7 +126,7 @@ public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesC public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} @Override - @Disabled("ScalarDB environment does not need to be upgraded with Object Storage") + @Disabled("The ScalarDB environment does not need to be upgraded with Object Storage") public void upgrade_WhenMetadataTableExistsButNotNamespacesTable_ShouldCreateNamespacesTableAndImportExistingNamespaces() {} }