From ed305aea6284e1f6cb269568b3d8ba06d227428f Mon Sep 17 00:00:00 2001 From: brfrn169 Date: Sun, 16 Nov 2025 17:43:36 +0900 Subject: [PATCH 1/2] Extend applicability of one-phase commit optimization --- .../consensuscommit/CommitHandler.java | 482 ++++++- .../CommitHandlerWithGroupCommit.java | 5 +- .../ConsensusCommitManager.java | 2 +- .../transaction/consensuscommit/Snapshot.java | 314 +--- .../TwoPhaseConsensusCommitManager.java | 2 +- .../consensuscommit/CommitHandlerTest.java | 1259 ++++++++++++++++- .../CommitHandlerWithGroupCommitTest.java | 240 ++++ .../consensuscommit/CrudHandlerTest.java | 11 +- .../consensuscommit/SnapshotTest.java | 967 +------------ ...nsusCommitSpecificIntegrationTestBase.java | 521 ++++++- 10 files changed, 2473 insertions(+), 1330 deletions(-) diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java index acd26d7820..3885544220 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java @@ -1,12 +1,22 @@ package com.scalar.db.transaction.consensuscommit; import static com.google.common.base.Preconditions.checkNotNull; +import static com.scalar.db.transaction.consensuscommit.ConsensusCommitUtils.getTransactionTableMetadata; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Iterables; +import com.google.common.collect.Iterators; import com.google.errorprone.annotations.concurrent.LazyInit; +import com.scalar.db.api.ConditionSetBuilder; import com.scalar.db.api.Delete; import com.scalar.db.api.DistributedStorage; +import com.scalar.db.api.Get; import com.scalar.db.api.Mutation; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.api.TableMetadata; import com.scalar.db.api.TransactionState; import com.scalar.db.common.CoreError; import com.scalar.db.exception.storage.ExecutionException; @@ -21,12 +31,19 @@ import com.scalar.db.exception.transaction.ValidationException; import com.scalar.db.transaction.consensuscommit.Coordinator.State; import com.scalar.db.transaction.consensuscommit.ParallelExecutor.ParallelExecutorTask; +import com.scalar.db.util.ScalarDbUtils; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.concurrent.Future; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -129,7 +146,9 @@ public void commit(TransactionContext context) Optional> beforePreparationHookFuture = invokeBeforePreparationHook(context); - if (canOnePhaseCommit(context)) { + ValidationInfo validationInfo = buildValidationInfo(context); + + if (canOnePhaseCommit(validationInfo, context)) { try { onePhaseCommitRecords(context); return; @@ -156,9 +175,9 @@ public void commit(TransactionContext context) } } - if (context.snapshot.hasReads()) { + if (validationInfo.isActuallyValidationRequired()) { try { - validateRecords(context); + validateRecords(validationInfo, context.transactionId); } catch (ValidationException e) { safelyCallOnFailureBeforeCommit(context); @@ -192,14 +211,14 @@ public void commit(TransactionContext context) } @VisibleForTesting - boolean canOnePhaseCommit(TransactionContext context) throws CommitException { + boolean canOnePhaseCommit(ValidationInfo validationInfo, TransactionContext context) + throws CommitException { if (!onePhaseCommitEnabled) { return false; } - // If validation is required (in SERIALIZABLE isolation), we cannot one-phase commit the - // transaction - if (context.isValidationRequired()) { + // If validation is required, we cannot one-phase commit the transaction + if (validationInfo.isActuallyValidationRequired()) { return false; } @@ -243,34 +262,31 @@ boolean canOnePhaseCommit(TransactionContext context) throws CommitException { } } - protected void handleCommitConflict(TransactionContext context, Exception cause) - throws CommitConflictException, UnknownTransactionStatusException { - try { - Optional s = coordinator.getState(context.transactionId); - if (s.isPresent()) { - TransactionState state = s.get().getState(); - if (state.equals(TransactionState.ABORTED)) { - rollbackRecords(context); - throw new CommitConflictException( - CoreError.CONSENSUS_COMMIT_CONFLICT_OCCURRED_WHEN_COMMITTING_STATE.buildMessage( - cause.getMessage()), - cause, - context.transactionId); + private ValidationInfo buildValidationInfo(TransactionContext context) { + if (context.isValidationRequired()) { + Set updatedRecordKeys = + Stream.concat( + context.snapshot.getWriteSet().stream().map(Map.Entry::getKey), + context.snapshot.getDeleteSet().stream().map(Map.Entry::getKey)) + .collect(Collectors.toSet()); + + Collection>> getSet = new ArrayList<>(); + for (Map.Entry> geSetEntry : context.snapshot.getGetSet()) { + // We don't need to validate the record in the write set or delete set + Snapshot.Key key = new Snapshot.Key(geSetEntry.getKey()); + if (!updatedRecordKeys.contains(key)) { + getSet.add(geSetEntry); } - } else { - throw new UnknownTransactionStatusException( - CoreError - .CONSENSUS_COMMIT_COMMITTING_STATE_FAILED_WITH_NO_MUTATION_EXCEPTION_BUT_COORDINATOR_STATUS_DOES_NOT_EXIST - .buildMessage(cause.getMessage()), - cause, - context.transactionId); } - } catch (CoordinatorException ex) { - throw new UnknownTransactionStatusException( - CoreError.CONSENSUS_COMMIT_CANNOT_GET_COORDINATOR_STATUS.buildMessage(ex.getMessage()), - ex, - context.transactionId); + + Collection>> scanSet = + context.snapshot.getScanSet(); + Collection scannerSet = context.snapshot.getScannerSet(); + + return new ValidationInfo(getSet, scanSet, scannerSet, updatedRecordKeys); } + + return new ValidationInfo(); } @VisibleForTesting @@ -336,15 +352,331 @@ public void prepareRecords(TransactionContext context) throws PreparationExcepti } public void validateRecords(TransactionContext context) throws ValidationException { + ValidationInfo validationInfo = buildValidationInfo(context); + if (!validationInfo.isActuallyValidationRequired()) { + return; + } + + validateRecords(validationInfo, context.transactionId); + } + + private void validateRecords(ValidationInfo validationInfo, String transactionId) + throws ValidationException { try { - // validation is executed when SERIALIZABLE is chosen. - context.snapshot.toSerializable(storage); + toSerializable(validationInfo, transactionId); } catch (ExecutionException e) { throw new ValidationException( CoreError.CONSENSUS_COMMIT_VALIDATION_FAILED.buildMessage(e.getMessage()), e, - context.transactionId); + transactionId); + } + } + + @VisibleForTesting + void toSerializable(ValidationInfo validationInfo, String transactionId) + throws ExecutionException, ValidationConflictException { + List tasks = new ArrayList<>(); + + // Scan set is re-validated to check if there is no anti-dependency + for (Map.Entry> entry : + validationInfo.scanSet) { + tasks.add( + () -> + validateScanResults( + storage, + entry.getKey(), + entry.getValue(), + false, + validationInfo.updatedRecordKeys, + transactionId)); } + + // Scanner set is re-validated to check if there is no anti-dependency + for (Snapshot.ScannerInfo scannerInfo : validationInfo.scannerSet) { + tasks.add( + () -> + validateScanResults( + storage, + scannerInfo.scan, + scannerInfo.results, + true, + validationInfo.updatedRecordKeys, + transactionId)); + } + + // Get set is re-validated to check if there is no anti-dependency + for (Map.Entry> entry : validationInfo.getSet) { + Get get = entry.getKey(); + TableMetadata metadata = getTableMetadata(get); + + if (ScalarDbUtils.isSecondaryIndexSpecified(get, metadata)) { + // For Get with index + tasks.add( + () -> + validateGetWithIndexResult( + storage, + get, + entry.getValue(), + validationInfo.updatedRecordKeys, + transactionId, + metadata)); + } else { + // For other Get + tasks.add(() -> validateGetResult(storage, get, entry.getValue(), transactionId, metadata)); + } + } + + parallelExecutor.validateRecords(tasks, transactionId); + } + + /** + * Validates the scan results to check if there is no anti-dependency. + * + *

This method scans the latest data and compares it with the scan results. If there is a + * discrepancy, it means that the scan results are changed by another transaction. In this case, + * an {@link ValidationConflictException} is thrown. + * + *

Since the validation is performed after the prepare-record phase, the scan might include + * prepared records if the transaction has performed operations that affect the scan result. In + * such cases, those prepared records can be safely ignored. + * + *

Note that this logic is based on the assumption that identical scans return results in the + * same order, provided that the underlying data remains unchanged. + * + * @param storage a distributed storage + * @param scan the scan to be validated + * @param results the results of the scan + * @param notFullyScannedScanner if this is a validation for a scanner that has not been fully + * scanned + * @param transactionId the transaction ID + * @throws ExecutionException if a storage operation fails + * @throws ValidationConflictException if the scan results are changed by another transaction + */ + private void validateScanResults( + DistributedStorage storage, + Scan scan, + LinkedHashMap results, + boolean notFullyScannedScanner, + Set updatedRecordKeys, + String transactionId) + throws ExecutionException, ValidationConflictException { + Scanner scanner = null; + try { + TableMetadata metadata = getTableMetadata(scan); + + scanner = storage.scan(ConsensusCommitUtils.prepareScanForStorage(scan, metadata)); + + // Initialize the iterator for the latest scan results + Optional latestResult = getNextResult(scanner, scan); + + // Initialize the iterator for the original scan results + Iterator> originalResultIterator = + results.entrySet().iterator(); + Map.Entry originalResultEntry = + Iterators.getNext(originalResultIterator, null); + + // Compare the records of the iterators + while (latestResult.isPresent() && originalResultEntry != null) { + TransactionResult latestTxResult = new TransactionResult(latestResult.get()); + Snapshot.Key key = new Snapshot.Key(scan, latestTxResult, metadata); + + if (latestTxResult.getId() != null && latestTxResult.getId().equals(transactionId)) { + // The record is inserted/deleted/updated by this transaction + + // Skip the record of the latest scan results + latestResult = getNextResult(scanner, scan); + + if (originalResultEntry.getKey().equals(key)) { + // The record is updated by this transaction + + // Skip the record of the original scan results + originalResultEntry = Iterators.getNext(originalResultIterator, null); + } else { + // The record is inserted/deleted by this transaction + } + + continue; + } + + // Compare the records of the original scan results and the latest scan results + if (!originalResultEntry.getKey().equals(key)) { + if (updatedRecordKeys.contains(originalResultEntry.getKey())) { + // The record is inserted/deleted/updated by this transaction + + // Skip the record of the original scan results + originalResultEntry = Iterators.getNext(originalResultIterator, null); + continue; + } + + // The record is inserted/deleted by another transaction + throwExceptionDueToAntiDependency(transactionId); + } + if (isChanged(latestTxResult, originalResultEntry.getValue())) { + // The record is updated by another transaction + throwExceptionDueToAntiDependency(transactionId); + } + + // Proceed to the next record + latestResult = getNextResult(scanner, scan); + originalResultEntry = Iterators.getNext(originalResultIterator, null); + } + + while (originalResultEntry != null) { + if (updatedRecordKeys.contains(originalResultEntry.getKey())) { + // The record is inserted/deleted/updated by this transaction + + // Skip the record of the original scan results + originalResultEntry = Iterators.getNext(originalResultIterator, null); + } else { + // The record is inserted/deleted by another transaction + throwExceptionDueToAntiDependency(transactionId); + } + } + + if (!latestResult.isPresent()) { + return; + } + + if (scan.getLimit() != 0 && results.size() == scan.getLimit()) { + // We’ve already checked up to the limit, so no further checks are needed + return; + } + + if (notFullyScannedScanner) { + // If the scanner is not fully scanned, no further checks are needed + return; + } + + // Check if there are any remaining records in the latest scan results + while (latestResult.isPresent()) { + TransactionResult latestTxResult = new TransactionResult(latestResult.get()); + + if (latestTxResult.getId() != null && latestTxResult.getId().equals(transactionId)) { + // The record is inserted/deleted by this transaction + + // Skip the record + latestResult = getNextResult(scanner, scan); + } else { + // The record is inserted by another transaction + throwExceptionDueToAntiDependency(transactionId); + } + } + } finally { + if (scanner != null) { + try { + scanner.close(); + } catch (IOException e) { + logger.warn("Failed to close the scanner. Transaction ID: {}", transactionId, e); + } + } + } + } + + private Optional getNextResult(Scanner scanner, Scan scan) throws ExecutionException { + Optional next = scanner.one(); + if (!next.isPresent()) { + return next; + } + + if (!scan.getConjunctions().isEmpty()) { + // Because we also get records whose before images match the conjunctions, we need to check if + // the current status of the records actually match the conjunctions. + next = + next.filter( + r -> + ScalarDbUtils.columnsMatchAnyOfConjunctions( + r.getColumns(), scan.getConjunctions())); + } + + return next.isPresent() ? next : getNextResult(scanner, scan); + } + + private void validateGetWithIndexResult( + DistributedStorage storage, + Get get, + Optional originalResult, + Set updatedRecordKeys, + String transactionId, + TableMetadata metadata) + throws ExecutionException, ValidationConflictException { + assert get.forNamespace().isPresent() && get.forTable().isPresent(); + + // If this transaction or another transaction inserts records into the index range, + // the Get with index operation may retrieve multiple records, which would result in + // an IllegalArgumentException. Therefore, we use Scan with index instead. + Scan scanWithIndex = + Scan.newBuilder() + .namespace(get.forNamespace().get()) + .table(get.forTable().get()) + .indexKey(get.getPartitionKey()) + .whereOr( + get.getConjunctions().stream() + .map(c -> ConditionSetBuilder.andConditionSet(c.getConditions()).build()) + .collect(Collectors.toSet())) + .consistency(get.getConsistency()) + .attributes(get.getAttributes()) + .build(); + + LinkedHashMap results = new LinkedHashMap<>(1); + originalResult.ifPresent(r -> results.put(new Snapshot.Key(scanWithIndex, r, metadata), r)); + + // Validate the result to check if there is no anti-dependency + validateScanResults(storage, scanWithIndex, results, false, updatedRecordKeys, transactionId); + } + + private void validateGetResult( + DistributedStorage storage, + Get get, + Optional originalResult, + String transactionId, + TableMetadata metadata) + throws ExecutionException, ValidationConflictException { + // Check if a read record is not changed + Optional latestResult = + storage + .get(ConsensusCommitUtils.prepareGetForStorage(get, metadata)) + .map(TransactionResult::new); + + if (!get.getConjunctions().isEmpty()) { + // Because we also get records whose before images match the conjunctions, we need to check if + // the current status of the records actually match the conjunctions. + latestResult = + latestResult.filter( + r -> + ScalarDbUtils.columnsMatchAnyOfConjunctions( + r.getColumns(), get.getConjunctions())); + } + + if (isChanged(latestResult, originalResult)) { + throwExceptionDueToAntiDependency(transactionId); + } + } + + private TableMetadata getTableMetadata(Operation operation) throws ExecutionException { + TransactionTableMetadata transactionTableMetadata = + getTransactionTableMetadata(tableMetadataManager, operation); + return transactionTableMetadata.getTableMetadata(); + } + + private boolean isChanged( + Optional latestResult, Optional result) { + if (latestResult.isPresent() != result.isPresent()) { + return true; + } + if (!latestResult.isPresent()) { + return false; + } + return isChanged(latestResult.get(), result.get()); + } + + private boolean isChanged(TransactionResult latestResult, TransactionResult result) { + return !Objects.equals(latestResult.getId(), result.getId()); + } + + private void throwExceptionDueToAntiDependency(String transactionId) + throws ValidationConflictException { + throw new ValidationConflictException( + CoreError.CONSENSUS_COMMIT_ANTI_DEPENDENCY_FOUND.buildMessage(), transactionId); } public void commitState(TransactionContext context) @@ -365,6 +697,36 @@ public void commitState(TransactionContext context) } } + protected void handleCommitConflict(TransactionContext context, Exception cause) + throws CommitConflictException, UnknownTransactionStatusException { + try { + Optional s = coordinator.getState(context.transactionId); + if (s.isPresent()) { + TransactionState state = s.get().getState(); + if (state.equals(TransactionState.ABORTED)) { + rollbackRecords(context); + throw new CommitConflictException( + CoreError.CONSENSUS_COMMIT_CONFLICT_OCCURRED_WHEN_COMMITTING_STATE.buildMessage( + cause.getMessage()), + cause, + context.transactionId); + } + } else { + throw new UnknownTransactionStatusException( + CoreError + .CONSENSUS_COMMIT_COMMITTING_STATE_FAILED_WITH_NO_MUTATION_EXCEPTION_BUT_COORDINATOR_STATUS_DOES_NOT_EXIST + .buildMessage(cause.getMessage()), + cause, + context.transactionId); + } + } catch (CoordinatorException ex) { + throw new UnknownTransactionStatusException( + CoreError.CONSENSUS_COMMIT_CANNOT_GET_COORDINATOR_STATUS.buildMessage(ex.getMessage()), + ex, + context.transactionId); + } + } + public void commitRecords(TransactionContext context) { try { CommitMutationComposer composer = @@ -444,4 +806,54 @@ public void rollbackRecords(TransactionContext context) { public void setBeforePreparationHook(BeforePreparationHook beforePreparationHook) { this.beforePreparationHook = checkNotNull(beforePreparationHook); } + + static class ValidationInfo { + public final Collection>> getSet; + public final Collection>> + scanSet; + public final Collection scannerSet; + public final Set updatedRecordKeys; + + ValidationInfo() { + this.getSet = new ArrayList<>(); + this.scanSet = new ArrayList<>(); + this.scannerSet = new ArrayList<>(); + this.updatedRecordKeys = new HashSet<>(); + } + + ValidationInfo( + Collection>> getSet, + Collection>> scanSet, + Collection scannerSet, + Set updatedRecordKeys) { + this.getSet = getSet; + this.scanSet = scanSet; + this.scannerSet = scannerSet; + this.updatedRecordKeys = updatedRecordKeys; + } + + boolean isActuallyValidationRequired() { + return !getSet.isEmpty() || !scanSet.isEmpty() || !scannerSet.isEmpty(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ValidationInfo)) { + return false; + } + ValidationInfo that = (ValidationInfo) o; + return Iterables.elementsEqual(getSet, that.getSet) + && Iterables.elementsEqual(scanSet, that.scanSet) + && Iterables.elementsEqual(scannerSet, that.scannerSet) + && Iterables.elementsEqual(updatedRecordKeys, that.updatedRecordKeys); + } + + @Override + public int hashCode() { + return Objects.hash(getSet, scanSet, scannerSet, updatedRecordKeys); + } + } } diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommit.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommit.java index de70c3209b..46be469dc7 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommit.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommit.java @@ -60,9 +60,10 @@ public void commit(TransactionContext context) } @Override - boolean canOnePhaseCommit(TransactionContext context) throws CommitException { + boolean canOnePhaseCommit(ValidationInfo validationInfo, TransactionContext context) + throws CommitException { try { - return super.canOnePhaseCommit(context); + return super.canOnePhaseCommit(validationInfo, context); } catch (CommitException e) { cancelGroupCommitIfNeeded(context.transactionId); throw e; diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitManager.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitManager.java index c04facb622..4a0c4f1d30 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitManager.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitManager.java @@ -263,7 +263,7 @@ DistributedTransaction begin( "Setting different isolation level from the one in DatabaseConfig might cause unexpected " + "anomalies"); } - Snapshot snapshot = new Snapshot(txId, isolation, tableMetadataManager, parallelExecutor); + Snapshot snapshot = new Snapshot(txId, tableMetadataManager); TransactionContext context = new TransactionContext(txId, snapshot, isolation, readOnly, oneOperation); DistributedTransaction transaction = diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/Snapshot.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/Snapshot.java index 775c4e80d8..01ad23ad8a 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/Snapshot.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/Snapshot.java @@ -2,15 +2,11 @@ import static com.scalar.db.transaction.consensuscommit.ConsensusCommitOperationAttributes.isImplicitPreReadEnabled; import static com.scalar.db.transaction.consensuscommit.ConsensusCommitOperationAttributes.isInsertModeEnabled; -import static com.scalar.db.transaction.consensuscommit.ConsensusCommitUtils.getTransactionTableMetadata; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Iterators; -import com.scalar.db.api.ConditionSetBuilder; import com.scalar.db.api.Delete; -import com.scalar.db.api.DistributedStorage; import com.scalar.db.api.Get; import com.scalar.db.api.Operation; import com.scalar.db.api.Put; @@ -19,22 +15,17 @@ import com.scalar.db.api.Scan; import com.scalar.db.api.ScanAll; import com.scalar.db.api.ScanWithIndex; -import com.scalar.db.api.Scanner; import com.scalar.db.api.Selection.Conjunction; import com.scalar.db.api.TableMetadata; import com.scalar.db.common.CoreError; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.exception.transaction.CrudException; -import com.scalar.db.exception.transaction.ValidationConflictException; import com.scalar.db.io.Column; -import com.scalar.db.transaction.consensuscommit.ParallelExecutor.ParallelExecutorTask; import com.scalar.db.util.ScalarDbUtils; -import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Comparator; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -44,20 +35,14 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.stream.Collectors; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; import javax.annotation.concurrent.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @NotThreadSafe public class Snapshot { - private static final Logger logger = LoggerFactory.getLogger(Snapshot.class); private final String id; - private final Isolation isolation; private final TransactionTableMetadataManager tableMetadataManager; - private final ParallelExecutor parallelExecutor; // The read set stores information about the records that are read in this transaction. This is // used as a previous version for write operations. @@ -81,15 +66,9 @@ public class Snapshot { // The delete set stores information about deletes in this transaction. private final Map deleteSet; - public Snapshot( - String id, - Isolation isolation, - TransactionTableMetadataManager tableMetadataManager, - ParallelExecutor parallelExecutor) { + public Snapshot(String id, TransactionTableMetadataManager tableMetadataManager) { this.id = id; - this.isolation = isolation; this.tableMetadataManager = tableMetadataManager; - this.parallelExecutor = parallelExecutor; readSet = new ConcurrentHashMap<>(); getSet = new ConcurrentHashMap<>(); scanSet = new HashMap<>(); @@ -101,9 +80,7 @@ public Snapshot( @VisibleForTesting Snapshot( String id, - Isolation isolation, TransactionTableMetadataManager tableMetadataManager, - ParallelExecutor parallelExecutor, ConcurrentMap> readSet, ConcurrentMap> getSet, Map> scanSet, @@ -111,9 +88,7 @@ public Snapshot( Map deleteSet, List scannerSet) { this.id = id; - this.isolation = isolation; this.tableMetadataManager = tableMetadataManager; - this.parallelExecutor = parallelExecutor; this.readSet = readSet; this.getSet = getSet; this.scanSet = scanSet; @@ -520,293 +495,6 @@ private Map> getAllColumns(Put put) { return columns; } - @VisibleForTesting - void toSerializable(DistributedStorage storage) - throws ExecutionException, ValidationConflictException { - if (isolation != Isolation.SERIALIZABLE) { - return; - } - - List tasks = new ArrayList<>(); - - // Scan set is re-validated to check if there is no anti-dependency - for (Map.Entry> entry : scanSet.entrySet()) { - tasks.add(() -> validateScanResults(storage, entry.getKey(), entry.getValue(), false)); - } - - // Scanner set is re-validated to check if there is no anti-dependency - for (ScannerInfo scannerInfo : scannerSet) { - tasks.add(() -> validateScanResults(storage, scannerInfo.scan, scannerInfo.results, true)); - } - - // Get set is re-validated to check if there is no anti-dependency - for (Map.Entry> entry : getSet.entrySet()) { - Get get = entry.getKey(); - TableMetadata metadata = getTableMetadata(get); - - if (ScalarDbUtils.isSecondaryIndexSpecified(get, metadata)) { - // For Get with index - tasks.add(() -> validateGetWithIndexResult(storage, get, entry.getValue(), metadata)); - } else { - // For other Get - - Key key = new Key(get); - if (writeSet.containsKey(key) || deleteSet.containsKey(key)) { - continue; - } - - tasks.add(() -> validateGetResult(storage, get, entry.getValue(), metadata)); - } - } - - parallelExecutor.validateRecords(tasks, id); - } - - /** - * Validates the scan results to check if there is no anti-dependency. - * - *

This method scans the latest data and compares it with the scan results. If there is a - * discrepancy, it means that the scan results are changed by another transaction. In this case, - * an {@link ValidationConflictException} is thrown. - * - *

Since the validation is performed after the prepare-record phase, the scan might include - * prepared records if the transaction has performed operations that affect the scan result. In - * such cases, those prepared records can be safely ignored. - * - *

Note that this logic is based on the assumption that identical scans return results in the - * same order, provided that the underlying data remains unchanged. - * - * @param storage a distributed storage - * @param scan the scan to be validated - * @param results the results of the scan - * @param notFullyScannedScanner if this is a validation for a scanner that has not been fully - * scanned - * @throws ExecutionException if a storage operation fails - * @throws ValidationConflictException if the scan results are changed by another transaction - */ - private void validateScanResults( - DistributedStorage storage, - Scan scan, - LinkedHashMap results, - boolean notFullyScannedScanner) - throws ExecutionException, ValidationConflictException { - Scanner scanner = null; - try { - TableMetadata metadata = getTableMetadata(scan); - - scanner = storage.scan(ConsensusCommitUtils.prepareScanForStorage(scan, metadata)); - - // Initialize the iterator for the latest scan results - Optional latestResult = getNextResult(scanner, scan); - - // Initialize the iterator for the original scan results - Iterator> originalResultIterator = - results.entrySet().iterator(); - Entry originalResultEntry = - Iterators.getNext(originalResultIterator, null); - - // Compare the records of the iterators - while (latestResult.isPresent() && originalResultEntry != null) { - TransactionResult latestTxResult = new TransactionResult(latestResult.get()); - Key key = new Key(scan, latestTxResult, metadata); - - if (latestTxResult.getId() != null && latestTxResult.getId().equals(id)) { - // The record is inserted/deleted/updated by this transaction - - // Skip the record of the latest scan results - latestResult = getNextResult(scanner, scan); - - if (originalResultEntry.getKey().equals(key)) { - // The record is updated by this transaction - - // Skip the record of the original scan results - originalResultEntry = Iterators.getNext(originalResultIterator, null); - } else { - // The record is inserted/deleted by this transaction - } - - continue; - } - - // Compare the records of the original scan results and the latest scan results - if (!originalResultEntry.getKey().equals(key)) { - if (writeSet.containsKey(originalResultEntry.getKey()) - || deleteSet.containsKey(originalResultEntry.getKey())) { - // The record is inserted/deleted/updated by this transaction - - // Skip the record of the original scan results - originalResultEntry = Iterators.getNext(originalResultIterator, null); - continue; - } - - // The record is inserted/deleted by another transaction - throwExceptionDueToAntiDependency(); - } - if (isChanged(latestTxResult, originalResultEntry.getValue())) { - // The record is updated by another transaction - throwExceptionDueToAntiDependency(); - } - - // Proceed to the next record - latestResult = getNextResult(scanner, scan); - originalResultEntry = Iterators.getNext(originalResultIterator, null); - } - - while (originalResultEntry != null) { - if (writeSet.containsKey(originalResultEntry.getKey()) - || deleteSet.containsKey(originalResultEntry.getKey())) { - // The record is inserted/deleted/updated by this transaction - - // Skip the record of the original scan results - originalResultEntry = Iterators.getNext(originalResultIterator, null); - } else { - // The record is inserted/deleted by another transaction - throwExceptionDueToAntiDependency(); - } - } - - if (!latestResult.isPresent()) { - return; - } - - if (scan.getLimit() != 0 && results.size() == scan.getLimit()) { - // We’ve already checked up to the limit, so no further checks are needed - return; - } - - if (notFullyScannedScanner) { - // If the scanner is not fully scanned, no further checks are needed - return; - } - - // Check if there are any remaining records in the latest scan results - while (latestResult.isPresent()) { - TransactionResult latestTxResult = new TransactionResult(latestResult.get()); - - if (latestTxResult.getId() != null && latestTxResult.getId().equals(id)) { - // The record is inserted/deleted by this transaction - - // Skip the record - latestResult = getNextResult(scanner, scan); - } else { - // The record is inserted by another transaction - throwExceptionDueToAntiDependency(); - } - } - } finally { - if (scanner != null) { - try { - scanner.close(); - } catch (IOException e) { - logger.warn("Failed to close the scanner. Transaction ID: {}", id, e); - } - } - } - } - - private Optional getNextResult(Scanner scanner, Scan scan) throws ExecutionException { - Optional next = scanner.one(); - if (!next.isPresent()) { - return next; - } - - if (!scan.getConjunctions().isEmpty()) { - // Because we also get records whose before images match the conjunctions, we need to check if - // the current status of the records actually match the conjunctions. - next = - next.filter( - r -> - ScalarDbUtils.columnsMatchAnyOfConjunctions( - r.getColumns(), scan.getConjunctions())); - } - - return next.isPresent() ? next : getNextResult(scanner, scan); - } - - private void validateGetWithIndexResult( - DistributedStorage storage, - Get get, - Optional originalResult, - TableMetadata metadata) - throws ExecutionException, ValidationConflictException { - assert get.forNamespace().isPresent() && get.forTable().isPresent(); - - // If this transaction or another transaction inserts records into the index range, - // the Get with index operation may retrieve multiple records, which would result in - // an IllegalArgumentException. Therefore, we use Scan with index instead. - Scan scanWithIndex = - Scan.newBuilder() - .namespace(get.forNamespace().get()) - .table(get.forTable().get()) - .indexKey(get.getPartitionKey()) - .whereOr( - get.getConjunctions().stream() - .map(c -> ConditionSetBuilder.andConditionSet(c.getConditions()).build()) - .collect(Collectors.toSet())) - .consistency(get.getConsistency()) - .attributes(get.getAttributes()) - .build(); - - LinkedHashMap results = new LinkedHashMap<>(1); - originalResult.ifPresent(r -> results.put(new Snapshot.Key(scanWithIndex, r, metadata), r)); - - // Validate the result to check if there is no anti-dependency - validateScanResults(storage, scanWithIndex, results, false); - } - - private void validateGetResult( - DistributedStorage storage, - Get get, - Optional originalResult, - TableMetadata metadata) - throws ExecutionException, ValidationConflictException { - // Check if a read record is not changed - Optional latestResult = - storage - .get(ConsensusCommitUtils.prepareGetForStorage(get, metadata)) - .map(TransactionResult::new); - - if (!get.getConjunctions().isEmpty()) { - // Because we also get records whose before images match the conjunctions, we need to check if - // the current status of the records actually match the conjunctions. - latestResult = - latestResult.filter( - r -> - ScalarDbUtils.columnsMatchAnyOfConjunctions( - r.getColumns(), get.getConjunctions())); - } - - if (isChanged(latestResult, originalResult)) { - throwExceptionDueToAntiDependency(); - } - } - - private TableMetadata getTableMetadata(Operation operation) throws ExecutionException { - TransactionTableMetadata transactionTableMetadata = - getTransactionTableMetadata(tableMetadataManager, operation); - return transactionTableMetadata.getTableMetadata(); - } - - private boolean isChanged( - Optional latestResult, Optional result) { - if (latestResult.isPresent() != result.isPresent()) { - return true; - } - if (!latestResult.isPresent()) { - return false; - } - return isChanged(latestResult.get(), result.get()); - } - - private boolean isChanged(TransactionResult latestResult, TransactionResult result) { - return !Objects.equals(latestResult.getId(), result.getId()); - } - - private void throwExceptionDueToAntiDependency() throws ValidationConflictException { - throw new ValidationConflictException( - CoreError.CONSENSUS_COMMIT_ANTI_DEPENDENCY_FOUND.buildMessage(), id); - } - @Immutable public static final class Key implements Comparable { private final String namespace; diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/TwoPhaseConsensusCommitManager.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/TwoPhaseConsensusCommitManager.java index 9f6058258b..2b75576cb6 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/TwoPhaseConsensusCommitManager.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/TwoPhaseConsensusCommitManager.java @@ -208,7 +208,7 @@ TwoPhaseCommitTransaction join(String txId, Isolation isolation) { @VisibleForTesting TwoPhaseCommitTransaction begin( String txId, Isolation isolation, boolean readOnly, boolean oneOperation) { - Snapshot snapshot = new Snapshot(txId, isolation, tableMetadataManager, parallelExecutor); + Snapshot snapshot = new Snapshot(txId, tableMetadataManager); TransactionContext context = new TransactionContext(txId, snapshot, isolation, readOnly, oneOperation); TwoPhaseConsensusCommit transaction = diff --git a/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerTest.java b/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerTest.java index be5b7c5bd0..64e816f644 100644 --- a/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerTest.java +++ b/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerTest.java @@ -1,9 +1,11 @@ package com.scalar.db.transaction.consensuscommit; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -15,13 +17,21 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; import com.google.common.util.concurrent.Uninterruptibles; +import com.scalar.db.api.Consistency; import com.scalar.db.api.Delete; import com.scalar.db.api.DistributedStorage; import com.scalar.db.api.Get; import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; import com.scalar.db.api.StorageInfo; +import com.scalar.db.api.TableMetadata; import com.scalar.db.api.TransactionState; +import com.scalar.db.common.ResultImpl; import com.scalar.db.common.StorageInfoImpl; import com.scalar.db.common.StorageInfoProvider; import com.scalar.db.exception.storage.ExecutionException; @@ -31,10 +41,21 @@ import com.scalar.db.exception.transaction.CommitException; import com.scalar.db.exception.transaction.UnknownTransactionStatusException; import com.scalar.db.exception.transaction.ValidationConflictException; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; import com.scalar.db.io.Key; +import com.scalar.db.io.TextColumn; import java.time.Duration; import java.time.Instant; +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.Future; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -50,14 +71,31 @@ public class CommitHandlerTest { private static final String ANY_NAME_1 = "name1"; private static final String ANY_NAME_2 = "name2"; private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_NAME_5 = "name5"; + private static final String ANY_NAME_6 = "name6"; private static final String ANY_TEXT_1 = "text1"; private static final String ANY_TEXT_2 = "text2"; private static final String ANY_TEXT_3 = "text3"; private static final String ANY_TEXT_4 = "text4"; + private static final String ANY_TEXT_5 = "text5"; + private static final String ANY_TEXT_6 = "text6"; private static final String ANY_ID = "id"; private static final int ANY_INT_1 = 100; private static final int ANY_INT_2 = 200; + private static final TableMetadata TABLE_METADATA = + ConsensusCommitUtils.buildTransactionTableMetadata( + TableMetadata.newBuilder() + .addColumn(ANY_NAME_1, DataType.TEXT) + .addColumn(ANY_NAME_2, DataType.TEXT) + .addColumn(ANY_NAME_3, DataType.TEXT) + .addColumn(ANY_NAME_4, DataType.TEXT) + .addPartitionKey(ANY_NAME_1) + .addClusteringKey(ANY_NAME_2) + .addSecondaryIndex(ANY_NAME_4) + .build()); + @Mock protected DistributedStorage storage; @Mock protected Coordinator coordinator; @Mock protected TransactionTableMetadataManager tableMetadataManager; @@ -109,6 +147,10 @@ void setUp() throws Exception { .thenReturn( new StorageInfoImpl( "storage1", StorageInfo.MutationAtomicityUnit.PARTITION, Integer.MAX_VALUE, false)); + when(tableMetadataManager.getTransactionTableMetadata(any())) + .thenReturn(new TransactionTableMetadata(TABLE_METADATA)); + when(tableMetadataManager.getTransactionTableMetadata(any(), any())) + .thenReturn(new TransactionTableMetadata(TABLE_METADATA)); } @AfterEach @@ -118,6 +160,81 @@ void tearDown() { parallelExecutor.close(); } + private Get prepareGet() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_4); + return Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Get prepareAnotherGet() { + Key partitionKey = Key.ofText(ANY_NAME_5, ANY_TEXT_5); + Key clusteringKey = Key.ofText(ANY_NAME_6, ANY_TEXT_6); + return Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Scan prepareScan() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .start(clusteringKey) + .build(); + } + + private Get prepareGetWithIndex() { + Key indexKey = Key.ofText(ANY_NAME_4, ANY_TEXT_1); + return Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .indexKey(indexKey) + .build(); + } + + private Scan prepareScanWithLimit(int limit) { + return Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) + .limit(limit) + .build(); + } + + private Scan prepareScanWithIndex() { + Key indexKey = Key.ofText(ANY_NAME_4, ANY_TEXT_1); + return Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .indexKey(indexKey) + .build(); + } + + private Put preparePut() { + return preparePut(ANY_TEXT_1, ANY_TEXT_2); + } + + private Put preparePut(String partitionKeyColumnValue, String clusteringKeyColumnValue) { + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(Key.ofText(ANY_NAME_1, partitionKeyColumnValue)) + .clusteringKey(Key.ofText(ANY_NAME_2, clusteringKeyColumnValue)) + .textValue(ANY_NAME_3, ANY_TEXT_3) + .textValue(ANY_NAME_4, ANY_TEXT_4) + .build(); + } + private Put preparePut1() { Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); @@ -154,17 +271,6 @@ private Put preparePut3() { .build(); } - private Get prepareGet() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_3); - return Get.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .build(); - } - private Delete prepareDelete() { return Delete.newBuilder() .namespace(ANY_NAMESPACE_NAME) @@ -174,6 +280,35 @@ private Delete prepareDelete() { .build(); } + private TransactionResult prepareResult(String txId) { + return prepareResult(txId, ANY_TEXT_1, ANY_TEXT_2); + } + + private TransactionResult prepareResult( + String txId, String partitionKeyColumnValue, String clusteringKeyColumnValue) { + ImmutableMap> columns = + ImmutableMap.>builder() + .put(ANY_NAME_1, TextColumn.of(ANY_NAME_1, partitionKeyColumnValue)) + .put(ANY_NAME_2, TextColumn.of(ANY_NAME_2, clusteringKeyColumnValue)) + .put(ANY_NAME_3, TextColumn.of(ANY_NAME_3, ANY_TEXT_3)) + .put(ANY_NAME_4, TextColumn.of(ANY_NAME_4, ANY_TEXT_4)) + .put(Attribute.ID, TextColumn.of(Attribute.ID, txId)) + .build(); + return new TransactionResult(new ResultImpl(columns, TABLE_METADATA)); + } + + private TransactionResult prepareResultWithNullMetadata() { + ImmutableMap> columns = + ImmutableMap.>builder() + .put(ANY_NAME_1, TextColumn.of(ANY_NAME_1, ANY_TEXT_1)) + .put(ANY_NAME_2, TextColumn.of(ANY_NAME_2, ANY_TEXT_2)) + .put(ANY_NAME_3, TextColumn.of(ANY_NAME_3, ANY_TEXT_3)) + .put(ANY_NAME_4, TextColumn.of(ANY_NAME_4, ANY_TEXT_4)) + .put(Attribute.ID, TextColumn.ofNull(Attribute.ID)) + .build(); + return new TransactionResult(new ResultImpl(columns, TABLE_METADATA)); + } + private Snapshot prepareSnapshotWithDifferentPartitionPut() { Snapshot snapshot = prepareSnapshot(); @@ -225,13 +360,8 @@ private Snapshot prepareSnapshotWithoutReads() { return snapshot; } - private Snapshot prepareSnapshotWithIsolation(Isolation isolation) { - return new Snapshot(anyId(), isolation, tableMetadataManager, new ParallelExecutor(config)); - } - private Snapshot prepareSnapshot() { - return new Snapshot( - anyId(), Isolation.SNAPSHOT, tableMetadataManager, new ParallelExecutor(config)); + return new Snapshot(anyId(), tableMetadataManager); } private void setBeforePreparationHookIfNeeded(boolean withBeforePreparationHook) { @@ -252,15 +382,16 @@ public void commit_SnapshotWithDifferentPartitionPutsGiven_ShouldCommitRespectiv doNothing().when(storage).mutate(anyList()); doNothingWhenCoordinatorPutState(); setBeforePreparationHookIfNeeded(withBeforePreparationHook); + doNothing().when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act handler.commit(context); // Assert verify(storage, times(4)).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verifyCoordinatorPutState(TransactionState.COMMITTED); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler, never()).onFailureBeforeCommit(any()); @@ -277,15 +408,16 @@ public void commit_SnapshotWithSamePartitionPutsGiven_ShouldCommitAtOnce( doNothing().when(storage).mutate(anyList()); doNothingWhenCoordinatorPutState(); setBeforePreparationHookIfNeeded(withBeforePreparationHook); + doNothing().when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act handler.commit(context); // Assert verify(storage, times(2)).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verifyCoordinatorPutState(TransactionState.COMMITTED); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler, never()).onFailureBeforeCommit(any()); @@ -300,15 +432,16 @@ public void commit_InReadOnlyMode_ShouldNotPrepareRecordsAndCommitStateAndCommit // Arrange Snapshot snapshot = spy(prepareSnapshotWithoutWrites()); setBeforePreparationHookIfNeeded(withBeforePreparationHook); + doNothing().when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, true, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, true, false); // Act handler.commit(context); // Assert verify(storage, never()).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verify(coordinator, never()).putState(any()); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler, never()).onFailureBeforeCommit(any()); @@ -324,15 +457,16 @@ public void commit_InReadOnlyMode_ShouldNotPrepareRecordsAndCommitStateAndCommit // Arrange Snapshot snapshot = spy(prepareSnapshotWithoutWrites()); setBeforePreparationHookIfNeeded(withBeforePreparationHook); + doNothing().when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act handler.commit(context); // Assert verify(storage, never()).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verify(coordinator, never()).putState(any()); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler, never()).onFailureBeforeCommit(any()); @@ -349,15 +483,16 @@ public void commit_InReadOnlyMode_ShouldNotPrepareRecordsAndCommitStateAndCommit handler = spy(createCommitHandler(false)); Snapshot snapshot = spy(prepareSnapshotWithoutWrites()); setBeforePreparationHookIfNeeded(withBeforePreparationHook); + doNothing().when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act handler.commit(context); // Assert verify(storage, never()).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verifyCoordinatorPutState(TransactionState.COMMITTED); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler, never()).onFailureBeforeCommit(any()); @@ -372,16 +507,16 @@ public void commit_InReadOnlyMode_ShouldNotPrepareRecordsAndCommitStateAndCommit // Arrange Snapshot snapshot = spy(prepareSnapshotWithoutWrites()); setBeforePreparationHookIfNeeded(withBeforePreparationHook); - doThrow(ValidationConflictException.class).when(snapshot).toSerializable(storage); + doThrow(ValidationConflictException.class).when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act Assert assertThatThrownBy(() -> handler.commit(context)).isInstanceOf(CommitConflictException.class); // Assert verify(storage, never()).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verify(coordinator, never()).putState(any()); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler).onFailureBeforeCommit(any()); @@ -397,16 +532,16 @@ public void commit_InReadOnlyMode_ShouldNotPrepareRecordsAndCommitStateAndCommit handler = spy(createCommitHandler(false)); Snapshot snapshot = spy(prepareSnapshotWithoutWrites()); setBeforePreparationHookIfNeeded(withBeforePreparationHook); - doThrow(ValidationConflictException.class).when(snapshot).toSerializable(storage); + doThrow(ValidationConflictException.class).when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act Assert assertThatThrownBy(() -> handler.commit(context)).isInstanceOf(CommitConflictException.class); // Assert verify(storage, never()).mutate(anyList()); - verify(snapshot).toSerializable(storage); + verify(handler).toSerializable(any(), eq(anyId())); verify(coordinator).putState(any()); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler).onFailureBeforeCommit(any()); @@ -422,15 +557,16 @@ public void commit_NoReadsInSnapshot_ShouldNotValidateRecords(boolean withBefore doNothing().when(storage).mutate(anyList()); doNothingWhenCoordinatorPutState(); setBeforePreparationHookIfNeeded(withBeforePreparationHook); + doNothing().when(handler).toSerializable(any(), anyString()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act handler.commit(context); // Assert verify(storage, times(2)).mutate(anyList()); - verify(snapshot, never()).toSerializable(storage); + verify(handler, never()).toSerializable(any(), eq(anyId())); verifyCoordinatorPutState(TransactionState.COMMITTED); verifyBeforePreparationHook(withBeforePreparationHook, context); verify(handler, never()).onFailureBeforeCommit(any()); @@ -637,11 +773,11 @@ public void commit_ValidationConflictExceptionThrownInValidation_ShouldAbortAndR // Arrange Snapshot snapshot = spy(prepareSnapshotWithDifferentPartitionPut()); doNothing().when(storage).mutate(anyList()); - doThrow(ValidationConflictException.class).when(snapshot).toSerializable(storage); + doThrow(ValidationConflictException.class).when(handler).toSerializable(any(), anyString()); doNothing().when(coordinator).putState(any(Coordinator.State.class)); doNothing().when(handler).rollbackRecords(any(TransactionContext.class)); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act assertThatThrownBy(() -> handler.commit(context)).isInstanceOf(CommitException.class); @@ -663,11 +799,11 @@ public void commit_ExceptionThrownInValidation_ShouldAbortAndRollbackRecords() // Arrange Snapshot snapshot = spy(prepareSnapshotWithDifferentPartitionPut()); doNothing().when(storage).mutate(anyList()); - doThrow(ExecutionException.class).when(snapshot).toSerializable(storage); + doThrow(ExecutionException.class).when(handler).toSerializable(any(), anyString()); doNothing().when(coordinator).putState(any(Coordinator.State.class)); doNothing().when(handler).rollbackRecords(any(TransactionContext.class)); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act assertThatThrownBy(() -> handler.commit(context)).isInstanceOf(CommitException.class); @@ -690,7 +826,7 @@ public void commit_ExceptionThrownInValidation_ShouldAbortAndRollbackRecords() // Arrange Snapshot snapshot = spy(prepareSnapshotWithDifferentPartitionPut()); doNothing().when(storage).mutate(anyList()); - doThrow(ExecutionException.class).when(snapshot).toSerializable(storage); + doThrow(ExecutionException.class).when(handler).toSerializable(any(), anyString()); doThrow(CoordinatorConflictException.class) .when(coordinator) .putState(new Coordinator.State(anyId(), TransactionState.ABORTED)); @@ -699,7 +835,7 @@ public void commit_ExceptionThrownInValidation_ShouldAbortAndRollbackRecords() .getState(anyId()); doNothing().when(handler).rollbackRecords(any(TransactionContext.class)); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act assertThatThrownBy(() -> handler.commit(context)).isInstanceOf(CommitException.class); @@ -723,13 +859,13 @@ public void commit_ExceptionThrownInValidation_ShouldAbortAndRollbackRecords() // Arrange Snapshot snapshot = spy(prepareSnapshotWithDifferentPartitionPut()); doNothing().when(storage).mutate(anyList()); - doThrow(ExecutionException.class).when(snapshot).toSerializable(storage); + doThrow(ExecutionException.class).when(handler).toSerializable(any(), anyString()); doThrow(CoordinatorConflictException.class) .when(coordinator) .putState(new Coordinator.State(anyId(), TransactionState.ABORTED)); doReturn(Optional.empty()).when(coordinator).getState(anyId()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act assertThatThrownBy(() -> handler.commit(context)) @@ -754,13 +890,13 @@ public void commit_ExceptionThrownInValidation_ShouldAbortAndRollbackRecords() // Arrange Snapshot snapshot = spy(prepareSnapshotWithDifferentPartitionPut()); doNothing().when(storage).mutate(anyList()); - doThrow(ExecutionException.class).when(snapshot).toSerializable(storage); + doThrow(ExecutionException.class).when(handler).toSerializable(any(), anyString()); doThrow(CoordinatorConflictException.class) .when(coordinator) .putState(new Coordinator.State(anyId(), TransactionState.ABORTED)); doThrow(CoordinatorException.class).when(coordinator).getState(anyId()); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act assertThatThrownBy(() -> handler.commit(context)) @@ -785,12 +921,12 @@ public void commit_ExceptionThrownInValidation_ShouldAbortAndRollbackRecords() // Arrange Snapshot snapshot = spy(prepareSnapshotWithDifferentPartitionPut()); doNothing().when(storage).mutate(anyList()); - doThrow(ExecutionException.class).when(snapshot).toSerializable(storage); + doThrow(ExecutionException.class).when(handler).toSerializable(any(), anyString()); doThrow(CoordinatorException.class) .when(coordinator) .putState(new Coordinator.State(anyId(), TransactionState.ABORTED)); TransactionContext context = - new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); + new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act assertThatThrownBy(() -> handler.commit(context)) @@ -1024,11 +1160,12 @@ public void commit_FailingBeforePreparationHookFutureGiven_ShouldThrowCommitExce public void canOnePhaseCommit_WhenOnePhaseCommitDisabled_ShouldReturnFalse() throws Exception { // Arrange Snapshot snapshot = prepareSnapshot(); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); TransactionContext context = new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); // Act - boolean actual = handler.canOnePhaseCommit(context); + boolean actual = handler.canOnePhaseCommit(validationInfo, context); // Assert assertThat(actual).isFalse(); @@ -1039,12 +1176,14 @@ public void canOnePhaseCommit_WhenOnePhaseCommitDisabled_ShouldReturnFalse() thr public void canOnePhaseCommit_WhenValidationRequired_ShouldReturnFalse() throws Exception { // Arrange CommitHandler handler = createCommitHandlerWithOnePhaseCommit(); - Snapshot snapshot = prepareSnapshotWithIsolation(Isolation.SERIALIZABLE); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); + when(validationInfo.isActuallyValidationRequired()).thenReturn(true); + Snapshot snapshot = prepareSnapshot(); TransactionContext context = new TransactionContext(anyId(), snapshot, Isolation.SERIALIZABLE, false, false); // Act - boolean actual = handler.canOnePhaseCommit(context); + boolean actual = handler.canOnePhaseCommit(validationInfo, context); // Assert assertThat(actual).isFalse(); @@ -1055,12 +1194,13 @@ public void canOnePhaseCommit_WhenValidationRequired_ShouldReturnFalse() throws public void canOnePhaseCommit_WhenNoWritesAndDeletes_ShouldReturnFalse() throws Exception { // Arrange CommitHandler handler = createCommitHandlerWithOnePhaseCommit(); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); Snapshot snapshot = prepareSnapshotWithoutWrites(); TransactionContext context = new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); // Act - boolean actual = handler.canOnePhaseCommit(context); + boolean actual = handler.canOnePhaseCommit(validationInfo, context); // Assert assertThat(actual).isFalse(); @@ -1072,6 +1212,7 @@ public void canOnePhaseCommit_WhenDeleteWithoutExistingRecord_ShouldReturnFalse( throws Exception { // Arrange CommitHandler handler = createCommitHandlerWithOnePhaseCommit(); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); Snapshot snapshot = prepareSnapshot(); // Setup a delete with no corresponding record in read set @@ -1083,7 +1224,7 @@ public void canOnePhaseCommit_WhenDeleteWithoutExistingRecord_ShouldReturnFalse( new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); // Act - boolean actual = handler.canOnePhaseCommit(context); + boolean actual = handler.canOnePhaseCommit(validationInfo, context); // Assert assertThat(actual).isFalse(); @@ -1094,6 +1235,7 @@ public void canOnePhaseCommit_WhenDeleteWithoutExistingRecord_ShouldReturnFalse( public void canOnePhaseCommit_WhenMutationsCanBeGrouped_ShouldReturnTrue() throws Exception { // Arrange CommitHandler handler = createCommitHandlerWithOnePhaseCommit(); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); Snapshot snapshot = prepareSnapshot(); Delete delete = prepareDelete(); @@ -1107,7 +1249,7 @@ public void canOnePhaseCommit_WhenMutationsCanBeGrouped_ShouldReturnTrue() throw new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); // Act - boolean actual = handler.canOnePhaseCommit(context); + boolean actual = handler.canOnePhaseCommit(validationInfo, context); // Assert assertThat(actual).isTrue(); @@ -1118,6 +1260,7 @@ public void canOnePhaseCommit_WhenMutationsCanBeGrouped_ShouldReturnTrue() throw public void canOnePhaseCommit_WhenMutationsCannotBeGrouped_ShouldReturnFalse() throws Exception { // Arrange CommitHandler handler = createCommitHandlerWithOnePhaseCommit(); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); Snapshot snapshot = prepareSnapshot(); Delete delete = prepareDelete(); @@ -1131,7 +1274,7 @@ public void canOnePhaseCommit_WhenMutationsCannotBeGrouped_ShouldReturnFalse() t new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); // Act - boolean actual = handler.canOnePhaseCommit(context); + boolean actual = handler.canOnePhaseCommit(validationInfo, context); // Assert assertThat(actual).isFalse(); @@ -1144,6 +1287,7 @@ public void canOnePhaseCommit_WhenMutationsCannotBeGrouped_ShouldReturnFalse() t throws ExecutionException { // Arrange CommitHandler handler = createCommitHandlerWithOnePhaseCommit(); + CommitHandler.ValidationInfo validationInfo = mock(CommitHandler.ValidationInfo.class); Snapshot snapshot = prepareSnapshot(); Delete delete = prepareDelete(); @@ -1157,7 +1301,7 @@ public void canOnePhaseCommit_WhenMutationsCannotBeGrouped_ShouldReturnFalse() t new TransactionContext(anyId(), snapshot, Isolation.SNAPSHOT, false, false); // Act Assert - assertThatThrownBy(() -> handler.canOnePhaseCommit(context)) + assertThatThrownBy(() -> handler.canOnePhaseCommit(validationInfo, context)) .isInstanceOf(CommitException.class) .hasCauseInstanceOf(ExecutionException.class); } @@ -1234,7 +1378,9 @@ public void commit_OnePhaseCommitted_ShouldNotThrowAnyException() CommitHandler handler = spy(createCommitHandlerWithOnePhaseCommit()); Snapshot snapshot = prepareSnapshotWithSamePartitionPut(); - doReturn(true).when(handler).canOnePhaseCommit(any(TransactionContext.class)); + doReturn(true) + .when(handler) + .canOnePhaseCommit(any(CommitHandler.ValidationInfo.class), any(TransactionContext.class)); doNothing().when(handler).onePhaseCommitRecords(any(TransactionContext.class)); TransactionContext context = @@ -1244,7 +1390,7 @@ public void commit_OnePhaseCommitted_ShouldNotThrowAnyException() handler.commit(context); // Assert - verify(handler).canOnePhaseCommit(context); + verify(handler).canOnePhaseCommit(new CommitHandler.ValidationInfo(), context); verify(handler).onePhaseCommitRecords(context); } @@ -1256,7 +1402,9 @@ public void commit_OnePhaseCommitted_ShouldNotThrowAnyException() CommitHandler handler = spy(createCommitHandlerWithOnePhaseCommit()); Snapshot snapshot = prepareSnapshotWithSamePartitionPut(); - doReturn(true).when(handler).canOnePhaseCommit(any(TransactionContext.class)); + doReturn(true) + .when(handler) + .canOnePhaseCommit(any(CommitHandler.ValidationInfo.class), any(TransactionContext.class)); doThrow(UnknownTransactionStatusException.class) .when(handler) .onePhaseCommitRecords(any(TransactionContext.class)); @@ -1271,6 +1419,1001 @@ public void commit_OnePhaseCommitted_ShouldNotThrowAnyException() verify(handler).onFailureBeforeCommit(context); } + @Test + public void toSerializable_ReadSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Get get = prepareAnotherGet(); + Put put = preparePut(); + TransactionResult result = prepareResult(anyId()); + TransactionResult txResult = new TransactionResult(result); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(get, Optional.of(txResult)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(put))); + + Get getForStorage = + Get.newBuilder(prepareAnotherGet()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.get(getForStorage)).thenReturn(Optional.of(txResult)); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).get(getForStorage); + } + + @Test + public void toSerializable_ReadSetUpdated_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Get get = prepareAnotherGet(); + Put put = preparePut(); + TransactionResult txResult = prepareResult(anyId()); + TransactionResult changedTxResult = prepareResult(anyId() + "x"); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(get, Optional.of(txResult)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(put))); + + Get getForStorage = + Get.newBuilder(prepareAnotherGet()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.get(getForStorage)).thenReturn(Optional.of(changedTxResult)); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).get(getForStorage); + } + + @Test + public void toSerializable_ReadSetExtended_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Get get = prepareAnotherGet(); + Put put = preparePut(); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(get, Optional.empty()); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(put))); + + TransactionResult txResult = prepareResult(anyId()); + Get getForStorage = + Get.newBuilder(prepareAnotherGet()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.get(getForStorage)).thenReturn(Optional.of(txResult)); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).get(getForStorage); + } + + @Test + public void toSerializable_GetSetWithGetWithIndex_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Get getWithIndex = prepareGetWithIndex(); + TransactionResult txResult = prepareResult(anyId() + "x"); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(getWithIndex, Optional.of(txResult)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.emptySet()); + + Scan scanForStorage = + Scan.newBuilder(prepareScanWithIndex()).consistency(Consistency.LINEARIZABLE).build(); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_GetSetWithGetWithIndex_RecordInserted_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Get getWithIndex = prepareGetWithIndex(); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "xx", ANY_TEXT_1, ANY_TEXT_3); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(getWithIndex, Optional.of(result1)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.emptySet()); + + Scan scanForStorage = + Scan.newBuilder(prepareScanWithIndex()).consistency(Consistency.LINEARIZABLE).build(); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_GetSetWithGetWithIndex_RecordInsertedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Get getWithIndex = prepareGetWithIndex(); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId(), ANY_TEXT_1, ANY_TEXT_3); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(getWithIndex, Optional.of(result1)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(preparePut(ANY_TEXT_1, ANY_TEXT_3)))); + + Scan scanForStorage = + Scan.newBuilder(prepareScanWithIndex()).consistency(Consistency.LINEARIZABLE).build(); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScanSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult txResult = prepareResult(anyId() + "x"); + Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.singletonMap(key, txResult)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScanSetUpdated_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult txResult = prepareResult(anyId()); + Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.singletonMap(key, txResult)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + TransactionResult changedTxResult = prepareResult(anyId() + "x"); + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(changedTxResult)).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScanSetUpdatedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult txResult = prepareResult(anyId()); + Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.singletonMap(key, txResult)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + TransactionResult changedTxResult = prepareResult(anyId()); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(preparePut()))); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(changedTxResult)).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScanSetExtended_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult result = prepareResult(anyId() + "x"); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.emptyMap()); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + TransactionResult txResult = new TransactionResult(result); + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanSetWithMultipleRecordsExtended_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult result1 = prepareResult(anyId() + "xx", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key2, result2)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScanSetExtendedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult result = prepareResult(anyId()); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.emptyMap()); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(preparePut()))); + + TransactionResult txResult = new TransactionResult(result); + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanSetWithMultipleRecordsExtendedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult result1 = prepareResult(anyId(), ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key2, result2)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(preparePut(ANY_TEXT_1, ANY_TEXT_2)))); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScanSetDeleted_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult txResult = prepareResult(anyId()); + Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.singletonMap(key, txResult)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanSetWithMultipleRecordsDeleted_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult result1 = prepareResult(anyId() + "xx", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); + Scan scanForStorage = + Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_MultipleScansInScanSetExist_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan1 = + Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) + .start(Key.ofText(ANY_NAME_2, ANY_TEXT_2)) + .build(); + Scan scan2 = + Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) + .start(Key.ofText(ANY_NAME_2, ANY_TEXT_1)) + .build(); + + Result result1 = + new TransactionResult( + new ResultImpl( + ImmutableMap.of( + ANY_NAME_1, + TextColumn.of(ANY_NAME_1, ANY_TEXT_1), + ANY_NAME_2, + TextColumn.of(ANY_NAME_2, ANY_TEXT_2), + Attribute.ID, + TextColumn.of(Attribute.ID, "id1")), + TABLE_METADATA)); + + Result result2 = + new TransactionResult( + new ResultImpl( + ImmutableMap.of( + ANY_NAME_1, + TextColumn.of(ANY_NAME_1, ANY_TEXT_2), + ANY_NAME_2, + TextColumn.of(ANY_NAME_2, ANY_TEXT_1), + Attribute.ID, + TextColumn.of(Attribute.ID, "id2")), + TABLE_METADATA)); + + Snapshot.Key key1 = new Snapshot.Key(scan1, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan2, result2, TABLE_METADATA); + + List>> scanSetEntries = + Arrays.asList( + new AbstractMap.SimpleEntry<>( + scan1, + Maps.newLinkedHashMap( + Collections.singletonMap(key1, new TransactionResult(result1)))), + new AbstractMap.SimpleEntry<>( + scan2, + Maps.newLinkedHashMap( + Collections.singletonMap(key2, new TransactionResult(result2))))); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + scanSetEntries, + Collections.emptyList(), + Collections.emptySet()); + + Scanner scanner1 = mock(Scanner.class); + when(scanner1.one()).thenReturn(Optional.of(result1)).thenReturn(Optional.empty()); + Scan scan1ForStorage = + Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) + .start(Key.ofText(ANY_NAME_2, ANY_TEXT_2)) + .consistency(Consistency.LINEARIZABLE) + .build(); + when(storage.scan(scan1ForStorage)).thenReturn(scanner1); + + Scanner scanner2 = mock(Scanner.class); + when(scanner2.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); + Scan scan2ForStorage = + Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) + .start(Key.ofText(ANY_NAME_2, ANY_TEXT_1)) + .consistency(Consistency.LINEARIZABLE) + .build(); + when(storage.scan(scan2ForStorage)).thenReturn(scanner2); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + } + + @Test + public void toSerializable_NullMetadataInReadSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Get get = prepareAnotherGet(); + Put put = preparePut(); + TransactionResult result = prepareResultWithNullMetadata(); + TransactionResult txResult = new TransactionResult(result); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(get, Optional.of(result)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(put))); + + Get getForStorage = Get.newBuilder(get).consistency(Consistency.LINEARIZABLE).build(); + when(storage.get(getForStorage)).thenReturn(Optional.of(txResult)); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).get(getForStorage); + } + + @Test + public void toSerializable_NullMetadataInReadSetChanged_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Get get = prepareAnotherGet(); + Put put = preparePut(); + TransactionResult result = prepareResultWithNullMetadata(); + TransactionResult changedResult = prepareResult(anyId()); + Map.Entry> getSetEntry = + new AbstractMap.SimpleEntry<>(get, Optional.of(result)); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.singletonList(getSetEntry), + Collections.emptyList(), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(put))); + + Get getForStorage = Get.newBuilder(get).consistency(Consistency.LINEARIZABLE).build(); + when(storage.get(getForStorage)).thenReturn(Optional.of(changedResult)); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).get(getForStorage); + } + + @Test + public void toSerializable_ScanWithLimitInScanSet_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithLimit(1); + TransactionResult result1 = prepareResult(anyId() + "x"); + TransactionResult result2 = prepareResult(anyId() + "x"); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(Collections.singletonMap(key1, result1)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRange_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithLimit(1); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_4); + TransactionResult insertedResult = prepareResult(anyId() + "xx", ANY_TEXT_1, ANY_TEXT_2); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(insertedResult)) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithLimit(1); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_4); + TransactionResult insertedResult = prepareResult(anyId(), ANY_TEXT_1, ANY_TEXT_2); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(preparePut(ANY_TEXT_1, ANY_TEXT_2)))); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(insertedResult)) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRange_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithLimit(3); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + TransactionResult insertedResult = prepareResult(anyId() + "xx", ANY_TEXT_1, ANY_TEXT_4); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.of(insertedResult)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithLimit(3); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + TransactionResult insertedResult = prepareResult(anyId(), ANY_TEXT_1, ANY_TEXT_4); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.singleton(new Snapshot.Key(preparePut(ANY_TEXT_1, ANY_TEXT_4)))); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.of(insertedResult)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecords_ShouldThrowValidationConflictException() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithIndex(); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_1); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_2, ANY_TEXT_1); + TransactionResult result3 = prepareResult(anyId() + "x", ANY_TEXT_3, ANY_TEXT_1); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + Snapshot.Key key3 = new Snapshot.Key(scan, result3, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2, key3, result3)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + Collections.emptySet()); + + // Simulate that the first and third records were updated by another transaction + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatThrownBy(() -> handler.toSerializable(validationInfo, anyId())) + .isInstanceOf(ValidationConflictException.class); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecordsByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithIndex(); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_1); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_2, ANY_TEXT_1); + TransactionResult result3 = prepareResult(anyId() + "x", ANY_TEXT_3, ANY_TEXT_1); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + Snapshot.Key key3 = new Snapshot.Key(scan, result3, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2, key3, result3)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + + // Simulate that the first and third records were updated by myself + Set updatedRecordKeys = new HashSet<>(); + updatedRecordKeys.add(key1); + updatedRecordKeys.add(key3); + + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + updatedRecordKeys); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void + toSerializable_ScanWithIndexInScanSet_WhenDeletingRecordsByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScanWithIndex(); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_1); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_2, ANY_TEXT_1); + TransactionResult result3 = prepareResult(anyId() + "x", ANY_TEXT_3, ANY_TEXT_1); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); + Snapshot.Key key3 = new Snapshot.Key(scan, result3, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2, key3, result3)); + Map.Entry> scanSetEntry = + new AbstractMap.SimpleEntry<>(scan, scanResults); + + // Simulate that the first and third records were deleted by myself + Set updatedRecordKeys = new HashSet<>(); + updatedRecordKeys.add(key1); + updatedRecordKeys.add(key3); + + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.singletonList(scanSetEntry), + Collections.emptyList(), + updatedRecordKeys); + + Scanner scanner = mock(Scanner.class); + when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); + + Scan scanForStorage = + Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + + @Test + public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + // Arrange + Scan scan = prepareScan(); + TransactionResult result1 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_2); + TransactionResult result2 = prepareResult(anyId() + "x", ANY_TEXT_1, ANY_TEXT_3); + Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); + LinkedHashMap scanResults = + Maps.newLinkedHashMap(ImmutableMap.of(key1, result1)); + Snapshot.ScannerInfo scannerInfo = new Snapshot.ScannerInfo(scan, scanResults); + CommitHandler.ValidationInfo validationInfo = + new CommitHandler.ValidationInfo( + Collections.emptyList(), + Collections.emptyList(), + Collections.singletonList(scannerInfo), + Collections.emptySet()); + + Scan scanForStorage = Scan.newBuilder(scan).consistency(Consistency.LINEARIZABLE).build(); + Scanner scanner = mock(Scanner.class); + when(scanner.one()) + .thenReturn(Optional.of(result1)) + .thenReturn(Optional.of(result2)) + .thenReturn(Optional.empty()); + when(storage.scan(scanForStorage)).thenReturn(scanner); + + // Act Assert + assertThatCode(() -> handler.toSerializable(validationInfo, anyId())) + .doesNotThrowAnyException(); + + // Assert + verify(storage).scan(scanForStorage); + } + protected void doThrowExceptionWhenCoordinatorPutState( TransactionState targetState, Class exceptionClass) throws CoordinatorException { diff --git a/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommitTest.java b/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommitTest.java index 3dca1951d3..925cb97ec7 100644 --- a/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommitTest.java +++ b/core/src/test/java/com/scalar/db/transaction/consensuscommit/CommitHandlerWithGroupCommitTest.java @@ -230,6 +230,7 @@ public void onePhaseCommitRecords_WhenSuccessful_ShouldMutateUsingComposerMutati public void onePhaseCommitRecords_WhenNoMutationExceptionThrown_ShouldThrowCommitConflictException() throws ExecutionException { + super.onePhaseCommitRecords_WhenNoMutationExceptionThrown_ShouldThrowCommitConflictException(); // Assert @@ -332,4 +333,243 @@ public void commit_OnePhaseCommitted_ShouldNotThrowAnyException() .commit_OnePhaseCommitted_UnknownTransactionStatusExceptionThrown_ShouldThrowUnknownTransactionStatusException(); groupCommitter.remove(anyId()); } + + @Test + @Override + public void toSerializable_ReadSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_ReadSetNotChanged_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ReadSetUpdated_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_ReadSetUpdated_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ReadSetExtended_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_ReadSetExtended_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_GetSetWithGetWithIndex_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_GetSetWithGetWithIndex_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_GetSetWithGetWithIndex_RecordInserted_ShouldThrowValidationConflictException() + throws ExecutionException { + super + .toSerializable_GetSetWithGetWithIndex_RecordInserted_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_GetSetWithGetWithIndex_RecordInsertedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super + .toSerializable_GetSetWithGetWithIndex_RecordInsertedByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_ScanSetNotChanged_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanSetUpdated_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_ScanSetUpdated_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanSetUpdatedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_ScanSetUpdatedByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanSetExtended_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_ScanSetExtended_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanSetWithMultipleRecordsExtended_ShouldThrowValidationConflictException() + throws ExecutionException { + super + .toSerializable_ScanSetWithMultipleRecordsExtended_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanSetExtendedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_ScanSetExtendedByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanSetWithMultipleRecordsExtendedByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super + .toSerializable_ScanSetWithMultipleRecordsExtendedByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanSetDeleted_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_ScanSetDeleted_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanSetWithMultipleRecordsDeleted_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_ScanSetWithMultipleRecordsDeleted_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_MultipleScansInScanSetExist_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_MultipleScansInScanSetExist_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_NullMetadataInReadSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_NullMetadataInReadSetNotChanged_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_NullMetadataInReadSetChanged_ShouldThrowValidationConflictException() + throws ExecutionException { + super.toSerializable_NullMetadataInReadSetChanged_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScanWithLimitInScanSet_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_ScanWithLimitInScanSet_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRange_ShouldThrowValidationConflictException() + throws ExecutionException { + super + .toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRange_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super + .toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRange_ShouldThrowValidationConflictException() + throws ExecutionException { + super + .toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRange_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super + .toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecords_ShouldThrowValidationConflictException() + throws ExecutionException { + super + .toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecords_ShouldThrowValidationConflictException(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecordsByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super + .toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecordsByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void + toSerializable_ScanWithIndexInScanSet_WhenDeletingRecordsByMyself_ShouldProcessWithoutExceptions() + throws ExecutionException { + super + .toSerializable_ScanWithIndexInScanSet_WhenDeletingRecordsByMyself_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } + + @Test + @Override + public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() + throws ExecutionException { + super.toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions(); + groupCommitter.remove(anyId()); + } } diff --git a/core/src/test/java/com/scalar/db/transaction/consensuscommit/CrudHandlerTest.java b/core/src/test/java/com/scalar/db/transaction/consensuscommit/CrudHandlerTest.java index 6209315145..792342a6dc 100644 --- a/core/src/test/java/com/scalar/db/transaction/consensuscommit/CrudHandlerTest.java +++ b/core/src/test/java/com/scalar/db/transaction/consensuscommit/CrudHandlerTest.java @@ -706,7 +706,7 @@ public void get_CalledTwiceUnderRealSnapshot_SecondTimeShouldReturnTheSameFromSn Get anotherGet = prepareGet(); Result result = prepareResult(TransactionState.COMMITTED); Optional expected = Optional.of(new TransactionResult(result)); - snapshot = new Snapshot(ANY_ID_1, Isolation.SNAPSHOT, tableMetadataManager, parallelExecutor); + snapshot = new Snapshot(ANY_ID_1, tableMetadataManager); when(storage.get(getForStorage)).thenReturn(Optional.of(result)); TransactionContext context = new TransactionContext(ANY_ID_1, snapshot, Isolation.SNAPSHOT, false, false); @@ -735,8 +735,7 @@ public void get_CalledTwiceUnderRealSnapshot_ReadCommittedIsolation_BothShouldRe Get anotherGet = prepareGet(); Result result = prepareResult(TransactionState.COMMITTED); Optional expected = Optional.of(new TransactionResult(result)); - snapshot = - new Snapshot(ANY_ID_1, Isolation.READ_COMMITTED, tableMetadataManager, parallelExecutor); + snapshot = new Snapshot(ANY_ID_1, tableMetadataManager); when(storage.get(getForStorage)).thenReturn(Optional.of(result)); TransactionContext context = new TransactionContext(ANY_ID_1, snapshot, Isolation.READ_COMMITTED, false, false); @@ -1158,7 +1157,7 @@ void scan_CalledTwiceUnderRealSnapshot_SecondTimeShouldReturnTheSameFromSnapshot Scan anotherScan = prepareScan(); result = prepareResult(TransactionState.COMMITTED); TransactionResult expected = new TransactionResult(result); - snapshot = new Snapshot(ANY_ID_1, Isolation.SNAPSHOT, tableMetadataManager, parallelExecutor); + snapshot = new Snapshot(ANY_ID_1, tableMetadataManager); if (scanType == ScanType.SCAN) { when(scanner.iterator()).thenReturn(Collections.singletonList(result).iterator()); } else { @@ -1229,7 +1228,7 @@ void scanOrGetScanner_GetCalledAfterScanUnderRealSnapshot_ShouldReturnFromStorag Scan scan = prepareScan(); Scan scanForStorage = toScanForStorageFrom(scan); result = prepareResult(TransactionState.COMMITTED); - snapshot = new Snapshot(ANY_ID_1, Isolation.SNAPSHOT, tableMetadataManager, parallelExecutor); + snapshot = new Snapshot(ANY_ID_1, tableMetadataManager); if (scanType == ScanType.SCAN) { when(scanner.iterator()).thenReturn(Collections.singletonList(result).iterator()); } else { @@ -1286,9 +1285,7 @@ void scanOrGetScanner_CalledAfterDeleteUnderRealSnapshot_ShouldThrowIllegalArgum snapshot = new Snapshot( ANY_ID_1, - Isolation.SNAPSHOT, tableMetadataManager, - parallelExecutor, readSet, new ConcurrentHashMap<>(), new HashMap<>(), diff --git a/core/src/test/java/com/scalar/db/transaction/consensuscommit/SnapshotTest.java b/core/src/test/java/com/scalar/db/transaction/consensuscommit/SnapshotTest.java index fbe900f70c..2ee3c2aa75 100644 --- a/core/src/test/java/com/scalar/db/transaction/consensuscommit/SnapshotTest.java +++ b/core/src/test/java/com/scalar/db/transaction/consensuscommit/SnapshotTest.java @@ -1,7 +1,6 @@ package com.scalar.db.transaction.consensuscommit; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatCode; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.catchThrowable; import static org.mockito.ArgumentMatchers.any; @@ -17,20 +16,15 @@ import com.scalar.db.api.ConditionBuilder; import com.scalar.db.api.ConditionSetBuilder; import com.scalar.db.api.ConditionalExpression; -import com.scalar.db.api.Consistency; import com.scalar.db.api.Delete; -import com.scalar.db.api.DistributedStorage; import com.scalar.db.api.Get; import com.scalar.db.api.Put; -import com.scalar.db.api.Result; import com.scalar.db.api.Scan; import com.scalar.db.api.ScanAll; -import com.scalar.db.api.Scanner; import com.scalar.db.api.TableMetadata; import com.scalar.db.common.ResultImpl; import com.scalar.db.exception.storage.ExecutionException; import com.scalar.db.exception.transaction.CrudException; -import com.scalar.db.exception.transaction.ValidationConflictException; import com.scalar.db.io.Column; import com.scalar.db.io.DataType; import com.scalar.db.io.IntColumn; @@ -97,7 +91,6 @@ public class SnapshotTest { private Map deleteSet; private List scannerSet; - @Mock private ConsensusCommitConfig config; @Mock private PrepareMutationComposer prepareComposer; @Mock private CommitMutationComposer commitComposer; @Mock private RollbackMutationComposer rollbackComposer; @@ -114,7 +107,7 @@ public void setUp() throws Exception { .thenReturn(new TransactionTableMetadata(TABLE_METADATA)); } - private Snapshot prepareSnapshot(Isolation isolation) { + private Snapshot prepareSnapshot() { readSet = new ConcurrentHashMap<>(); getSet = new ConcurrentHashMap<>(); scanSet = new HashMap<>(); @@ -125,9 +118,7 @@ private Snapshot prepareSnapshot(Isolation isolation) { return spy( new Snapshot( ANY_ID, - isolation, tableMetadataManager, - new ParallelExecutor(config), readSet, getSet, scanSet, @@ -137,30 +128,13 @@ private Snapshot prepareSnapshot(Isolation isolation) { } private TransactionResult prepareResult(String txId) { - return prepareResult(txId, ANY_TEXT_1, ANY_TEXT_2); - } - - private TransactionResult prepareResult( - String txId, String partitionKeyColumnValue, String clusteringKeyColumnValue) { - ImmutableMap> columns = - ImmutableMap.>builder() - .put(ANY_NAME_1, TextColumn.of(ANY_NAME_1, partitionKeyColumnValue)) - .put(ANY_NAME_2, TextColumn.of(ANY_NAME_2, clusteringKeyColumnValue)) - .put(ANY_NAME_3, TextColumn.of(ANY_NAME_3, ANY_TEXT_3)) - .put(ANY_NAME_4, TextColumn.of(ANY_NAME_4, ANY_TEXT_4)) - .put(Attribute.ID, TextColumn.of(Attribute.ID, txId)) - .build(); - return new TransactionResult(new ResultImpl(columns, TABLE_METADATA)); - } - - private TransactionResult prepareResultWithNullMetadata() { ImmutableMap> columns = ImmutableMap.>builder() .put(ANY_NAME_1, TextColumn.of(ANY_NAME_1, ANY_TEXT_1)) .put(ANY_NAME_2, TextColumn.of(ANY_NAME_2, ANY_TEXT_2)) .put(ANY_NAME_3, TextColumn.of(ANY_NAME_3, ANY_TEXT_3)) .put(ANY_NAME_4, TextColumn.of(ANY_NAME_4, ANY_TEXT_4)) - .put(Attribute.ID, TextColumn.ofNull(Attribute.ID)) + .put(Attribute.ID, TextColumn.of(Attribute.ID, txId)) .build(); return new TransactionResult(new ResultImpl(columns, TABLE_METADATA)); } @@ -187,15 +161,6 @@ private Get prepareAnotherGet() { .build(); } - private Get prepareGetWithIndex() { - Key indexKey = Key.ofText(ANY_NAME_4, ANY_TEXT_1); - return Get.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .indexKey(indexKey) - .build(); - } - private Scan prepareScan() { Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); @@ -207,24 +172,6 @@ private Scan prepareScan() { .build(); } - private Scan prepareScanWithLimit(int limit) { - return Scan.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) - .limit(limit) - .build(); - } - - private Scan prepareScanWithIndex() { - Key indexKey = Key.ofText(ANY_NAME_4, ANY_TEXT_1); - return Scan.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .indexKey(indexKey) - .build(); - } - private Scan prepareCrossPartitionScan() { return prepareCrossPartitionScan(ANY_NAMESPACE_NAME, ANY_TABLE_NAME); } @@ -239,15 +186,11 @@ private Scan prepareCrossPartitionScan(String namespace, String table) { } private Put preparePut() { - return preparePut(ANY_TEXT_1, ANY_TEXT_2); - } - - private Put preparePut(String partitionKeyColumnValue, String clusteringKeyColumnValue) { return Put.newBuilder() .namespace(ANY_NAMESPACE_NAME) .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, partitionKeyColumnValue)) - .clusteringKey(Key.ofText(ANY_NAME_2, clusteringKeyColumnValue)) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) + .clusteringKey(Key.ofText(ANY_NAME_2, ANY_TEXT_2)) .textValue(ANY_NAME_3, ANY_TEXT_3) .textValue(ANY_NAME_4, ANY_TEXT_4) .build(); @@ -291,15 +234,11 @@ private Put preparePutForMergeTest() { } private Delete prepareDelete() { - return prepareDelete(ANY_TEXT_1, ANY_TEXT_2); - } - - private Delete prepareDelete(String partitionKeyColumnValue, String clusteringKeyColumnValue) { return Delete.newBuilder() .namespace(ANY_NAMESPACE_NAME) .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, partitionKeyColumnValue)) - .clusteringKey(Key.ofText(ANY_NAME_2, clusteringKeyColumnValue)) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) + .clusteringKey(Key.ofText(ANY_NAME_2, ANY_TEXT_2)) .build(); } @@ -326,7 +265,7 @@ private void configureBehavior() throws ExecutionException { @Test public void putIntoReadSet_ResultGiven_ShouldHoldWhatsGivenInReadSet() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Snapshot.Key key = new Snapshot.Key(prepareGet()); TransactionResult result = prepareResult(ANY_ID); @@ -340,7 +279,7 @@ public void putIntoReadSet_ResultGiven_ShouldHoldWhatsGivenInReadSet() { @Test public void putIntoGetSet_ResultGiven_ShouldHoldWhatsGivenInReadSet() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Get get = prepareGet(); TransactionResult result = prepareResult(ANY_ID); @@ -354,7 +293,7 @@ public void putIntoGetSet_ResultGiven_ShouldHoldWhatsGivenInReadSet() { @Test public void putIntoWriteSet_PutGiven_ShouldHoldWhatsGivenInWriteSet() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key key = new Snapshot.Key(put); @@ -368,7 +307,7 @@ public void putIntoWriteSet_PutGiven_ShouldHoldWhatsGivenInWriteSet() { @Test public void putIntoWriteSet_PutGivenTwice_ShouldHoldMergedPut() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put1 = preparePut(); Snapshot.Key key = new Snapshot.Key(put1); @@ -404,7 +343,7 @@ public void putIntoWriteSet_PutGivenTwice_ShouldHoldMergedPut() { @Test public void putIntoWriteSet_PutGivenAfterDelete_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Delete delete = prepareDelete(); Snapshot.Key deleteKey = new Snapshot.Key(prepareDelete()); snapshot.putIntoDeleteSet(deleteKey, delete); @@ -421,7 +360,7 @@ public void putIntoWriteSet_PutGivenAfterDelete_ShouldThrowIllegalArgumentExcept public void putIntoWriteSet_PutWithInsertModeEnabledGivenAfterPut_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Put putWithInsertModeEnabled = Put.newBuilder(put).enableInsertMode().build(); Snapshot.Key key = new Snapshot.Key(put); @@ -436,7 +375,7 @@ public void putIntoWriteSet_PutGivenAfterDelete_ShouldThrowIllegalArgumentExcept public void putIntoWriteSet_PutWithImplicitPreReadEnabledGivenAfterWithInsertModeEnabled_ShouldHoldMergedPutWithoutImplicitPreRead() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put putWithInsertModeEnabled = Put.newBuilder(preparePut()).enableInsertMode().build(); Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); @@ -474,7 +413,7 @@ public void putIntoWriteSet_PutGivenAfterDelete_ShouldThrowIllegalArgumentExcept @Test public void putIntoDeleteSet_DeleteGiven_ShouldHoldWhatsGivenInDeleteSet() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Delete delete = prepareDelete(); Snapshot.Key key = new Snapshot.Key(delete); @@ -488,7 +427,7 @@ public void putIntoDeleteSet_DeleteGiven_ShouldHoldWhatsGivenInDeleteSet() { @Test public void putIntoDeleteSet_DeleteGivenAfterPut_PutSupercedesDelete() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(preparePut()); snapshot.putIntoWriteSet(putKey, put); @@ -509,7 +448,7 @@ public void putIntoDeleteSet_DeleteGivenAfterPut_PutSupercedesDelete() { public void putIntoDeleteSet_DeleteGivenAfterPutWithInsertModeEnabled_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Delete delete = prepareDelete(); Snapshot.Key key = new Snapshot.Key(delete); @@ -524,7 +463,7 @@ public void putIntoDeleteSet_DeleteGivenAfterPut_PutSupercedesDelete() { @Test public void putIntoScanSet_ScanGiven_ShouldHoldWhatsGivenInScanSet() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Scan scan = prepareScan(); TransactionResult result = prepareResult(ANY_ID); Snapshot.Key key = new Snapshot.Key(scan, result, TABLE_METADATA); @@ -542,7 +481,7 @@ public void putIntoScanSet_ScanGiven_ShouldHoldWhatsGivenInScanSet() { public void getResult_KeyNeitherContainedInWriteSetNorReadSet_ShouldReturnEmpty() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Snapshot.Key key = new Snapshot.Key(prepareGet()); // Act @@ -556,7 +495,7 @@ public void getResult_KeyNeitherContainedInWriteSetNorReadSet_ShouldReturnEmpty( public void getResult_KeyContainedInWriteSetButNotContainedInReadSet_ShouldReturnProperResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key key = new Snapshot.Key(prepareGet()); snapshot.putIntoWriteSet(key, put); @@ -581,7 +520,7 @@ public void getResult_KeyContainedInWriteSetButNotContainedInReadSet_ShouldRetur public void getResult_KeyContainedInWriteSetAndReadSetGiven_ShouldReturnMergedResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePutForMergeTest(); Snapshot.Key key = new Snapshot.Key(prepareGet()); TransactionResult result = prepareResult(ANY_ID); @@ -600,7 +539,7 @@ public void getResult_KeyContainedInWriteSetAndReadSetGiven_ShouldReturnMergedRe public void getResult_KeyContainedInDeleteSetAndReadSetGiven_ShouldReturnEmpty() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Delete delete = prepareDelete(); Snapshot.Key key = new Snapshot.Key(delete); TransactionResult result = prepareResult(ANY_ID); @@ -619,7 +558,7 @@ public void getResult_KeyContainedInDeleteSetAndReadSetGiven_ShouldReturnEmpty() getResult_KeyNeitherContainedInDeleteSetNorWriteSetButContainedInAndReadSetGiven_ShouldReturnOriginalResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Snapshot.Key key = new Snapshot.Key(prepareGet()); TransactionResult result = prepareResult(ANY_ID); snapshot.putIntoReadSet(key, Optional.of(result)); @@ -635,7 +574,7 @@ public void getResult_KeyContainedInDeleteSetAndReadSetGiven_ShouldReturnEmpty() public void getResult_KeyContainedInWriteSetAndGetNotContainedInGetSet_ShouldReturnEmpty() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Get get = prepareGet(); Snapshot.Key key = new Snapshot.Key(get); @@ -650,7 +589,7 @@ public void getResult_KeyContainedInWriteSetAndGetNotContainedInGetSet_ShouldRet public void getResult_KeyContainedInWriteSetAndGetNotContainedInGetSet_ShouldReturnProperResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Get get = prepareGet(); Snapshot.Key key = new Snapshot.Key(get); @@ -677,7 +616,7 @@ public void getResult_KeyContainedInWriteSetAndGetNotContainedInGetSet_ShouldRet getResult_KeyContainedInWriteSetAndGetContainedInGetSetGiven_ShouldReturnMergedResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePutForMergeTest(); Get get = prepareGet(); Snapshot.Key key = new Snapshot.Key(get); @@ -697,7 +636,7 @@ public void getResult_KeyContainedInWriteSetAndGetNotContainedInGetSet_ShouldRet public void getResult_KeyContainedInDeleteSetAndGetContainedInGetSetGiven_ShouldReturnEmpty() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Delete delete = prepareDelete(); Get get = prepareGet(); Snapshot.Key key = new Snapshot.Key(get); @@ -717,7 +656,7 @@ public void getResult_KeyContainedInDeleteSetAndGetContainedInGetSetGiven_Should getResult_KeyNeitherContainedInDeleteSetNorWriteSetAndGetContainedInGetSetGiven_ShouldReturnOriginalResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Get get = prepareGet(); Snapshot.Key key = new Snapshot.Key(get); TransactionResult result = prepareResult(ANY_ID); @@ -735,7 +674,7 @@ public void getResult_KeyContainedInDeleteSetAndGetContainedInGetSetGiven_Should getResult_KeyContainedInWriteSetAndGetContainedInGetSetWithMatchedConjunctionGiven_ShouldReturnMergedResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePutForMergeTest(); ConditionalExpression condition = ConditionBuilder.column(ANY_NAME_3).isEqualToText(ANY_TEXT_5); Get get = Get.newBuilder(prepareGet()).where(condition).build(); @@ -757,7 +696,7 @@ public void getResult_KeyContainedInDeleteSetAndGetContainedInGetSetGiven_Should getResult_KeyNeitherContainedInDeleteSetNorWriteSetAndGetContainedInGetSetWithUnmatchedConjunction_ShouldReturnOriginalResult() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Snapshot.Key key = new Snapshot.Key(prepareGet()); TransactionResult result = prepareResult(ANY_ID); ConditionalExpression condition = ConditionBuilder.column(ANY_NAME_1).isEqualToText(ANY_TEXT_2); @@ -776,7 +715,7 @@ public void getResult_KeyContainedInDeleteSetAndGetContainedInGetSetGiven_Should getResult_KeyContainedInWriteSetAndGetContainedInGetSetWithUnmatchedConjunctionGiven_ShouldReturnEmpty() throws CrudException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePutForMergeTest(); ConditionalExpression condition = ConditionBuilder.column(ANY_NAME_3).isEqualToText(ANY_TEXT_3); Get get = Get.newBuilder(prepareGet()).where(condition).build(); @@ -795,7 +734,7 @@ public void getResult_KeyContainedInDeleteSetAndGetContainedInGetSetGiven_Should @Test public void getResults_ScanNotContainedInScanSetGiven_ShouldReturnEmpty() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Scan scan = prepareScan(); // Act @@ -808,7 +747,7 @@ public void getResults_ScanNotContainedInScanSetGiven_ShouldReturnEmpty() { @Test public void getResults_ScanContainedInScanSetGiven_ShouldReturnProperResults() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Scan scan = prepareScan(); TransactionResult result1 = mock(TransactionResult.class); @@ -900,7 +839,7 @@ private void assertMergedResultIsEqualTo(TransactionResult result) { public void to_PrepareMutationComposerGivenAndSnapshotIsolationSet_ShouldCallComposerProperly() throws ExecutionException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Delete delete = prepareAnotherDelete(); TransactionResult result = prepareResult(ANY_ID); @@ -922,7 +861,7 @@ public void to_PrepareMutationComposerGivenAndSnapshotIsolationSet_ShouldCallCom public void to_CommitMutationComposerGiven_ShouldCallComposerProperly() throws ExecutionException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Delete delete = prepareAnotherDelete(); TransactionResult result = prepareResult(ANY_ID); @@ -943,7 +882,7 @@ public void to_CommitMutationComposerGiven_ShouldCallComposerProperly() public void to_RollbackMutationComposerGiven_ShouldCallComposerProperly() throws ExecutionException { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Delete delete = prepareAnotherDelete(); TransactionResult result = prepareResult(ANY_ID); @@ -961,803 +900,11 @@ public void to_RollbackMutationComposerGiven_ShouldCallComposerProperly() verify(rollbackComposer).add(delete, result); } - @Test - public void toSerializable_ReadSetNotChanged_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get get = prepareAnotherGet(); - Put put = preparePut(); - TransactionResult result = prepareResult(ANY_ID); - TransactionResult txResult = new TransactionResult(result); - snapshot.putIntoGetSet(get, Optional.of(txResult)); - snapshot.putIntoWriteSet(new Snapshot.Key(put), put); - DistributedStorage storage = mock(DistributedStorage.class); - Get getForStorage = - Get.newBuilder(prepareAnotherGet()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.get(getForStorage)).thenReturn(Optional.of(txResult)); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).get(getForStorage); - } - - @Test - public void toSerializable_ReadSetUpdated_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get get = prepareAnotherGet(); - Put put = preparePut(); - TransactionResult txResult = prepareResult(ANY_ID); - snapshot.putIntoGetSet(get, Optional.of(txResult)); - snapshot.putIntoWriteSet(new Snapshot.Key(put), put); - DistributedStorage storage = mock(DistributedStorage.class); - TransactionResult changedTxResult = prepareResult(ANY_ID + "x"); - Get getForStorage = - Get.newBuilder(prepareAnotherGet()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.get(getForStorage)).thenReturn(Optional.of(changedTxResult)); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).get(getForStorage); - } - - @Test - public void toSerializable_ReadSetExtended_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get get = prepareAnotherGet(); - Put put = preparePut(); - snapshot.putIntoGetSet(get, Optional.empty()); - snapshot.putIntoWriteSet(new Snapshot.Key(put), put); - DistributedStorage storage = mock(DistributedStorage.class); - TransactionResult txResult = prepareResult(ANY_ID); - Get getForStorage = - Get.newBuilder(prepareAnotherGet()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.get(getForStorage)).thenReturn(Optional.of(txResult)); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).get(getForStorage); - } - - @Test - public void toSerializable_GetSetWithGetWithIndex_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get getWithIndex = prepareGetWithIndex(); - TransactionResult txResult = prepareResult(ANY_ID + "x"); - snapshot.putIntoGetSet(getWithIndex, Optional.of(txResult)); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(prepareScanWithIndex()).consistency(Consistency.LINEARIZABLE).build(); - - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_GetSetWithGetWithIndex_RecordInserted_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get getWithIndex = prepareGetWithIndex(); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "xx", ANY_TEXT_1, ANY_TEXT_3); - snapshot.putIntoGetSet(getWithIndex, Optional.of(result1)); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(prepareScanWithIndex()).consistency(Consistency.LINEARIZABLE).build(); - - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_GetSetWithGetWithIndex_RecordInsertedByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get getWithIndex = prepareGetWithIndex(); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID, ANY_TEXT_1, ANY_TEXT_3); - snapshot.putIntoGetSet(getWithIndex, Optional.of(result1)); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(prepareScanWithIndex()).consistency(Consistency.LINEARIZABLE).build(); - - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScanSetNotChanged_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult txResult = prepareResult(ANY_ID + "x"); - Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.singletonMap(key, txResult))); - DistributedStorage storage = mock(DistributedStorage.class); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScanSetUpdated_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult txResult = prepareResult(ANY_ID); - Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.singletonMap(key, txResult))); - DistributedStorage storage = mock(DistributedStorage.class); - TransactionResult changedTxResult = prepareResult(ANY_ID + "x"); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(changedTxResult)).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScanSetUpdatedByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult txResult = prepareResult(ANY_ID); - Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.singletonMap(key, txResult))); - DistributedStorage storage = mock(DistributedStorage.class); - TransactionResult changedTxResult = prepareResult(ANY_ID); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(changedTxResult)).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScanSetExtended_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult result = prepareResult(ANY_ID + "x"); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.emptyMap())); - DistributedStorage storage = mock(DistributedStorage.class); - TransactionResult txResult = new TransactionResult(result); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanSetWithMultipleRecordsExtended_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult result1 = prepareResult(ANY_ID + "xx", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(ImmutableMap.of(key2, result2))); - DistributedStorage storage = mock(DistributedStorage.class); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScanSetExtendedByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult result = prepareResult(ANY_ID); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.emptyMap())); - DistributedStorage storage = mock(DistributedStorage.class); - TransactionResult txResult = new TransactionResult(result); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(txResult)).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanSetWithMultipleRecordsExtendedByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult result1 = prepareResult(ANY_ID, ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(ImmutableMap.of(key2, result2))); - DistributedStorage storage = mock(DistributedStorage.class); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScanSetDeleted_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult txResult = prepareResult(ANY_ID); - Snapshot.Key key = new Snapshot.Key(scan, txResult, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.singletonMap(key, txResult))); - DistributedStorage storage = mock(DistributedStorage.class); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanSetWithMultipleRecordsDeleted_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult result1 = prepareResult(ANY_ID + "xx", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - snapshot.putIntoScanSet( - scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2))); - - DistributedStorage storage = mock(DistributedStorage.class); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); - Scan scanForStorage = - Scan.newBuilder(prepareScan()).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_MultipleScansInScanSetExist_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - - Scan scan1 = - Scan.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) - .start(Key.ofText(ANY_NAME_2, ANY_TEXT_2)) - .build(); - Scan scan2 = - Scan.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) - .start(Key.ofText(ANY_NAME_2, ANY_TEXT_1)) - .build(); - - Result result1 = - new TransactionResult( - new ResultImpl( - ImmutableMap.of( - ANY_NAME_1, - TextColumn.of(ANY_NAME_1, ANY_TEXT_1), - ANY_NAME_2, - TextColumn.of(ANY_NAME_2, ANY_TEXT_2), - Attribute.ID, - TextColumn.of(Attribute.ID, "id1")), - TABLE_METADATA)); - - Result result2 = - new TransactionResult( - new ResultImpl( - ImmutableMap.of( - ANY_NAME_1, - TextColumn.of(ANY_NAME_1, ANY_TEXT_2), - ANY_NAME_2, - TextColumn.of(ANY_NAME_2, ANY_TEXT_1), - Attribute.ID, - TextColumn.of(Attribute.ID, "id2")), - TABLE_METADATA)); - - Snapshot.Key key1 = new Snapshot.Key(scan1, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan2, result2, TABLE_METADATA); - - snapshot.putIntoScanSet( - scan1, - Maps.newLinkedHashMap(Collections.singletonMap(key1, new TransactionResult(result1)))); - snapshot.putIntoScanSet( - scan2, - Maps.newLinkedHashMap(Collections.singletonMap(key2, new TransactionResult(result2)))); - - DistributedStorage storage = mock(DistributedStorage.class); - - Scanner scanner1 = mock(Scanner.class); - when(scanner1.one()).thenReturn(Optional.of(result1)).thenReturn(Optional.empty()); - Scan scan1ForStorage = - Scan.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_1)) - .start(Key.ofText(ANY_NAME_2, ANY_TEXT_2)) - .consistency(Consistency.LINEARIZABLE) - .build(); - when(storage.scan(scan1ForStorage)).thenReturn(scanner1); - - Scanner scanner2 = mock(Scanner.class); - when(scanner2.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); - Scan scan2ForStorage = - Scan.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) - .start(Key.ofText(ANY_NAME_2, ANY_TEXT_1)) - .consistency(Consistency.LINEARIZABLE) - .build(); - when(storage.scan(scan2ForStorage)).thenReturn(scanner2); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - } - - @Test - public void toSerializable_NullMetadataInReadSetNotChanged_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get get = prepareAnotherGet(); - Put put = preparePut(); - TransactionResult result = prepareResultWithNullMetadata(); - TransactionResult txResult = new TransactionResult(result); - snapshot.putIntoGetSet(get, Optional.of(result)); - snapshot.putIntoWriteSet(new Snapshot.Key(put), put); - DistributedStorage storage = mock(DistributedStorage.class); - Get getForStorage = Get.newBuilder(get).consistency(Consistency.LINEARIZABLE).build(); - when(storage.get(getForStorage)).thenReturn(Optional.of(txResult)); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).get(getForStorage); - } - - @Test - public void toSerializable_NullMetadataInReadSetChanged_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Get get = prepareAnotherGet(); - Put put = preparePut(); - TransactionResult result = prepareResultWithNullMetadata(); - TransactionResult changedResult = prepareResult(ANY_ID); - snapshot.putIntoGetSet(get, Optional.of(result)); - snapshot.putIntoWriteSet(new Snapshot.Key(put), put); - DistributedStorage storage = mock(DistributedStorage.class); - Get getForStorage = Get.newBuilder(get).consistency(Consistency.LINEARIZABLE).build(); - when(storage.get(getForStorage)).thenReturn(Optional.of(changedResult)); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).get(getForStorage); - } - - @Test - public void toSerializable_ScanWithLimitInScanSet_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithLimit(1); - TransactionResult result1 = prepareResult(ANY_ID + "x"); - TransactionResult result2 = prepareResult(ANY_ID + "x"); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(Collections.singletonMap(key1, result1))); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRange_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithLimit(1); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_4); - TransactionResult insertedResult = prepareResult(ANY_ID + "xx", ANY_TEXT_1, ANY_TEXT_2); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1))); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(insertedResult)) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithLimitInScanSet_WhenInsertingFirstRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithLimit(1); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_4); - TransactionResult insertedResult = prepareResult(ANY_ID, ANY_TEXT_1, ANY_TEXT_2); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - snapshot.putIntoScanSet(scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1))); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(insertedResult)) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRange_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithLimit(3); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - TransactionResult insertedResult = prepareResult(ANY_ID + "xx", ANY_TEXT_1, ANY_TEXT_4); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - snapshot.putIntoScanSet( - scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2))); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.of(insertedResult)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithLimitInScanSet_WhenInsertingLastRecordIntoScanRangeByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithLimit(3); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - TransactionResult insertedResult = prepareResult(ANY_ID, ANY_TEXT_1, ANY_TEXT_4); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - snapshot.putIntoScanSet( - scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2))); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.of(insertedResult)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecords_ShouldThrowValidationConflictException() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithIndex(); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_1); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_2, ANY_TEXT_1); - TransactionResult result3 = prepareResult(ANY_ID + "x", ANY_TEXT_3, ANY_TEXT_1); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - Snapshot.Key key3 = new Snapshot.Key(scan, result3, TABLE_METADATA); - snapshot.putIntoScanSet( - scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2, key3, result3))); - - // Simulate that the first and third records were updated by another transaction - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); - - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatThrownBy(() -> snapshot.toSerializable(storage)) - .isInstanceOf(ValidationConflictException.class); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithIndexInScanSet_WhenUpdatingRecordsByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithIndex(); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_1); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_2, ANY_TEXT_1); - TransactionResult result3 = prepareResult(ANY_ID + "x", ANY_TEXT_3, ANY_TEXT_1); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - Snapshot.Key key3 = new Snapshot.Key(scan, result3, TABLE_METADATA); - snapshot.putIntoScanSet( - scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2, key3, result3))); - - // Simulate that the first and third records were updated by myself - snapshot.putIntoWriteSet(key1, preparePut(ANY_TEXT_1, ANY_TEXT_1)); - snapshot.putIntoWriteSet(key3, preparePut(ANY_TEXT_3, ANY_TEXT_1)); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); - - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void - toSerializable_ScanWithIndexInScanSet_WhenDeletingRecordsByMyself_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScanWithIndex(); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_1); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_2, ANY_TEXT_1); - TransactionResult result3 = prepareResult(ANY_ID + "x", ANY_TEXT_3, ANY_TEXT_1); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - Snapshot.Key key2 = new Snapshot.Key(scan, result2, TABLE_METADATA); - Snapshot.Key key3 = new Snapshot.Key(scan, result3, TABLE_METADATA); - snapshot.putIntoScanSet( - scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1, key2, result2, key3, result3))); - - // Simulate that the first and third records were deleted by myself - snapshot.putIntoDeleteSet(key1, prepareDelete(ANY_TEXT_1, ANY_TEXT_1)); - snapshot.putIntoDeleteSet(key3, prepareDelete(ANY_TEXT_3, ANY_TEXT_1)); - Scanner scanner = mock(Scanner.class); - when(scanner.one()).thenReturn(Optional.of(result2)).thenReturn(Optional.empty()); - - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = - Scan.newBuilder(scan).limit(0).consistency(Consistency.LINEARIZABLE).build(); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - - @Test - public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() - throws ExecutionException { - // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); - Scan scan = prepareScan(); - TransactionResult result1 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_2); - TransactionResult result2 = prepareResult(ANY_ID + "x", ANY_TEXT_1, ANY_TEXT_3); - Snapshot.Key key1 = new Snapshot.Key(scan, result1, TABLE_METADATA); - snapshot.putIntoScannerSet(scan, Maps.newLinkedHashMap(ImmutableMap.of(key1, result1))); - DistributedStorage storage = mock(DistributedStorage.class); - Scan scanForStorage = Scan.newBuilder(scan).consistency(Consistency.LINEARIZABLE).build(); - Scanner scanner = mock(Scanner.class); - when(scanner.one()) - .thenReturn(Optional.of(result1)) - .thenReturn(Optional.of(result2)) - .thenReturn(Optional.empty()); - when(storage.scan(scanForStorage)).thenReturn(scanner); - - // Act Assert - assertThatCode(() -> snapshot.toSerializable(storage)).doesNotThrowAnyException(); - - // Assert - verify(storage).scan(scanForStorage); - } - @Test public void verifyNoOverlap_ScanGivenAndDeleteKeyAlreadyPresentInDeleteSet_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Delete delete = prepareDelete(); Snapshot.Key deleteKey = new Snapshot.Key(delete); snapshot.putIntoDeleteSet(deleteKey, delete); @@ -1777,7 +924,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanGivenAndPutKeyAlreadyPresentInScanSet_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -1797,7 +944,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanGivenAndPutWithSamePartitionKeyWithoutClusteringKeyInWriteSet_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePutWithPartitionKeyOnly(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -1814,7 +961,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanWithNoRangeGivenAndPutInWriteSetOverlappedWithScan_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); // "text2" Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); @@ -1838,7 +985,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanWithNoRangeGivenButPutInWriteSetNotOverlappedWithScanWithConjunctions_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -1861,7 +1008,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanWithRangeGivenAndPutInWriteSetOverlappedWithScan_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); // "text2" Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); @@ -1921,7 +1068,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanWithEndSideInfiniteRangeGivenAndPutInWriteSetOverlappedWithScan_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); // "text2" Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); @@ -1969,7 +1116,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() public void verifyNoOverlap_ScanWithStartSideInfiniteRangeGivenAndPutInWriteSetOverlappedWithScan_ShouldThrowIllegalArgumentException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); // "text2" Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); @@ -2016,7 +1163,7 @@ public void toSerializable_ScannerSetNotChanged_ShouldProcessWithoutExceptions() @Test public void verifyNoOverlap_ScanWithIndexGivenAndPutInWriteSetInSameTable_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2041,7 +1188,7 @@ public void verifyNoOverlap_ScanWithIndexGivenAndPutInWriteSetInSameTable_Should public void verifyNoOverlap_ScanWithIndexGivenAndPutInWriteSetInDifferentTable_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = Put.newBuilder() .namespace(ANY_NAMESPACE_NAME) @@ -2071,7 +1218,7 @@ public void verifyNoOverlap_ScanWithIndexGivenAndPutInWriteSetInSameTable_Should @Test public void verifyNoOverlap_ScanWithIndexAndPutWithSameIndexKeyGiven_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put1 = Put.newBuilder() .namespace(ANY_NAMESPACE_NAME) @@ -2113,7 +1260,7 @@ public void verifyNoOverlap_ScanWithIndexAndPutWithSameIndexKeyGiven_ShouldThrow public void verifyNoOverlap_ScanWithIndexAndPutWithSameIndexKeyGivenButNotOverlappedWithScanWithConjunctions_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put1 = Put.newBuilder() .namespace(ANY_NAMESPACE_NAME) @@ -2156,7 +1303,7 @@ public void verifyNoOverlap_ScanWithIndexAndPutWithSameIndexKeyGiven_ShouldThrow @Test public void verifyNoOverlap_ScanAllGivenAndPutInWriteSetInSameTable_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); // "text2" Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); @@ -2179,7 +1326,7 @@ public void verifyNoOverlap_ScanAllGivenAndPutInWriteSetInSameTable_ShouldThrowE public void verifyNoOverlap_ScanAllGivenAndPutInWriteSetNotOverlappingWithScanAll_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SNAPSHOT); + snapshot = prepareSnapshot(); // "text2" Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); @@ -2201,7 +1348,7 @@ public void verifyNoOverlap_ScanAllGivenAndPutInWriteSetInSameTable_ShouldThrowE @Test public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2221,7 +1368,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanGivenAndPutInDifferentNamespace_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2241,7 +1388,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanGivenAndPutInDifferentTable_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2261,7 +1408,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanGivenAndNewPutInSameTableAndAllConditionsMatch_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePutWithIntColumns(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2294,7 +1441,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanGivenAndNewPutInSameTableAndAnyConjunctionMatch_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2316,7 +1463,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanGivenAndNewPutInSameTableAndLikeConditionsMatch_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2338,7 +1485,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanGivenAndNewPutInSameTableButConditionNotMatch_ShouldNotThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePut(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); @@ -2360,7 +1507,7 @@ public void verifyNoOverlap_CrossPartitionScanGivenAndPutInSameTable_ShouldThrow public void verifyNoOverlap_CrossPartitionScanWithoutConjunctionGivenAndNewPutInSameTable_ShouldThrowException() { // Arrange - snapshot = prepareSnapshot(Isolation.SERIALIZABLE); + snapshot = prepareSnapshot(); Put put = preparePutWithIntColumns(); Snapshot.Key putKey = new Snapshot.Key(put); snapshot.putIntoWriteSet(putKey, put); diff --git a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java index 716533854d..2c011f358f 100644 --- a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java @@ -7067,7 +7067,7 @@ void manager_mutate_ShouldMutateRecords(Isolation isolation) throws TransactionE case TABLE: case NAMESPACE: case STORAGE: - if (onePhaseCommitEnabled && isolation != Isolation.SERIALIZABLE) { + if (onePhaseCommitEnabled) { // one-phase commit, so only one mutation call verify(storage).mutate(anyList()); @@ -7172,7 +7172,7 @@ void manager_mutate_ShouldMutateRecords(Isolation isolation) throws TransactionE case TABLE: case NAMESPACE: case STORAGE: - if (onePhaseCommitEnabled && isolation != Isolation.SERIALIZABLE) { + if (onePhaseCommitEnabled) { // one-phase commit, so only one mutation call verify(storage).mutate(anyList()); @@ -7293,7 +7293,7 @@ void manager_mutate_ShouldMutateRecords(Isolation isolation) throws TransactionE } break; case STORAGE: - if (onePhaseCommitEnabled && isolation != Isolation.SERIALIZABLE) { + if (onePhaseCommitEnabled) { // one-phase commit, so only one mutation call verify(storage).mutate(anyList()); @@ -7345,6 +7345,521 @@ void manager_mutate_ShouldMutateRecords(Isolation isolation) throws TransactionE assertThat(result2.get().getInt(BALANCE)).isEqualTo(200); } + @ParameterizedTest + @MethodSource("isolationAndOnePhaseCommitEnabled") + public void + updateAndCommit_SinglePartitionMutationsGiven_ShouldBehaveCorrectlyBasedOnStorageAtomicityUnit( + Isolation isolation, boolean onePhaseCommitEnabled) + throws TransactionException, ExecutionException, CoordinatorException { + if (isGroupCommitEnabled() && onePhaseCommitEnabled) { + // Enabling both one-phase commit and group commit is not supported + return; + } + + // Arrange + + // Prepare initial records + createConsensusCommitManager(isolation) + .mutate( + Arrays.asList( + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, INITIAL_BALANCE) + .build(), + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 1)) + .intValue(BALANCE, INITIAL_BALANCE) + .build())); + + ConsensusCommitManager manager = createConsensusCommitManager(isolation, onePhaseCommitEnabled); + DistributedTransaction transaction = manager.begin(); + + // Act + transaction.update( + Update.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, 100) + .build()); + transaction.update( + Update.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 1)) + .intValue(BALANCE, 200) + .build()); + transaction.commit(); + + // Assert + verify(storage, times(2)).get(any(Get.class)); + + StorageInfo storageInfo = admin.getStorageInfo(namespace1); + switch (storageInfo.getMutationAtomicityUnit()) { + case RECORD: + // twice for prepare, twice for commit + verify(storage, times(4)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit( + anyString(), anyList(), any(TransactionState.class), anyLong()); + return; + } + verify(coordinator).putState(any(Coordinator.State.class)); + break; + case PARTITION: + case TABLE: + case NAMESPACE: + case STORAGE: + if (onePhaseCommitEnabled) { + // one-phase commit, so only one mutation call + verify(storage).mutate(anyList()); + + // no commit-state should occur + verify(coordinator, never()).putState(any(Coordinator.State.class)); + } else { + // one for prepare, one for commit + verify(storage, times(2)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit( + anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + } + break; + default: + throw new AssertionError(); + } + + Optional result1 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result1.isPresent()).isTrue(); + assertThat(result1.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result1.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result1.get().getInt(BALANCE)).isEqualTo(100); + + Optional result2 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 1)) + .build()); + assertThat(result2.isPresent()).isTrue(); + assertThat(result2.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result2.get().getInt(ACCOUNT_TYPE)).isEqualTo(1); + assertThat(result2.get().getInt(BALANCE)).isEqualTo(200); + } + + @ParameterizedTest + @MethodSource("isolationAndOnePhaseCommitEnabled") + public void + updateAndCommit_TwoPartitionsMutationsGiven_ShouldBehaveCorrectlyBasedOnStorageAtomicityUnit( + Isolation isolation, boolean onePhaseCommitEnabled) + throws TransactionException, ExecutionException, CoordinatorException { + if (isGroupCommitEnabled() && onePhaseCommitEnabled) { + // Enabling both one-phase commit and group commit is not supported + return; + } + + // Arrange + + // Prepare initial records + createConsensusCommitManager(isolation) + .mutate( + Arrays.asList( + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, INITIAL_BALANCE) + .build(), + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 1)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, INITIAL_BALANCE) + .build())); + + ConsensusCommitManager manager = createConsensusCommitManager(isolation, onePhaseCommitEnabled); + DistributedTransaction transaction = manager.begin(); + + // Act + transaction.update( + Update.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, 100) + .build()); + transaction.update( + Update.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 1)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, 200) + .build()); + transaction.commit(); + + // Assert + verify(storage, times(2)).get(any(Get.class)); + + StorageInfo storageInfo = admin.getStorageInfo(namespace1); + switch (storageInfo.getMutationAtomicityUnit()) { + case RECORD: + case PARTITION: + // twice for prepare, twice for commit + verify(storage, times(4)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit( + anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + break; + case TABLE: + case NAMESPACE: + case STORAGE: + if (onePhaseCommitEnabled) { + // one-phase commit, so only one mutation call + verify(storage).mutate(anyList()); + + // no commit-state should occur + verify(coordinator, never()).putState(any(Coordinator.State.class)); + } else { + // one for prepare, one for commit + verify(storage, times(2)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit( + anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + } + break; + default: + throw new AssertionError(); + } + + Optional result1 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result1.isPresent()).isTrue(); + assertThat(result1.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result1.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result1.get().getInt(BALANCE)).isEqualTo(100); + + Optional result2 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 1)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result2.isPresent()).isTrue(); + assertThat(result2.get().getInt(ACCOUNT_ID)).isEqualTo(1); + assertThat(result2.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result2.get().getInt(BALANCE)).isEqualTo(200); + } + + @ParameterizedTest + @MethodSource("isolationAndOnePhaseCommitEnabled") + public void + updateAndCommit_TwoNamespacesMutationsGiven_ShouldBehaveCorrectlyBasedOnStorageAtomicityUnit( + Isolation isolation, boolean onePhaseCommitEnabled) + throws TransactionException, ExecutionException, CoordinatorException { + if (isGroupCommitEnabled() && onePhaseCommitEnabled) { + // Enabling both one-phase commit and group commit is not supported + return; + } + + // Arrange + + // Prepare initial records + createConsensusCommitManager(isolation) + .mutate( + Arrays.asList( + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, INITIAL_BALANCE) + .build(), + Insert.newBuilder() + .namespace(namespace2) + .table(TABLE_2) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, INITIAL_BALANCE) + .build())); + + ConsensusCommitManager manager = createConsensusCommitManager(isolation, onePhaseCommitEnabled); + DistributedTransaction transaction = manager.begin(); + + // Act + transaction.update( + Update.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, 100) + .build()); + transaction.update( + Update.newBuilder() + .namespace(namespace2) + .table(TABLE_2) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, 200) + .build()); + transaction.commit(); + + // Assert + verify(storage, times(2)).get(any(Get.class)); + + StorageInfo storageInfo1 = admin.getStorageInfo(namespace1); + StorageInfo storageInfo2 = admin.getStorageInfo(namespace2); + if (!storageInfo1.getStorageName().equals(storageInfo2.getStorageName())) { + // different storages + + // twice for prepare, twice for commit + verify(storage, times(4)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit(anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + } else { + // same storage + switch (storageInfo1.getMutationAtomicityUnit()) { + case RECORD: + case PARTITION: + case TABLE: + case NAMESPACE: + // twice for prepare, twice for commit + verify(storage, times(4)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit( + anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + break; + case STORAGE: + if (onePhaseCommitEnabled) { + // one-phase commit, so only one mutation call + verify(storage).mutate(anyList()); + + // no commit-state should occur + verify(coordinator, never()).putState(any(Coordinator.State.class)); + } else { + // one for prepare, one for commit + verify(storage, times(2)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit( + anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + } + break; + default: + throw new AssertionError(); + } + } + + Optional result1 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result1.isPresent()).isTrue(); + assertThat(result1.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result1.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result1.get().getInt(BALANCE)).isEqualTo(100); + + Optional result2 = + manager.get( + Get.newBuilder() + .namespace(namespace2) + .table(TABLE_2) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result2.isPresent()).isTrue(); + assertThat(result2.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result2.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result2.get().getInt(BALANCE)).isEqualTo(200); + } + + @ParameterizedTest + @MethodSource("isolationAndOnePhaseCommitEnabled") + public void getAndInsertAndCommit_ShouldBehaveCorrectly( + Isolation isolation, boolean onePhaseCommitEnabled) + throws TransactionException, ExecutionException, CoordinatorException { + if (isGroupCommitEnabled() && onePhaseCommitEnabled) { + // Enabling both one-phase commit and group commit is not supported + return; + } + + // Arrange + + // Prepare initial record + createConsensusCommitManager(isolation) + .insert( + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .intValue(BALANCE, INITIAL_BALANCE) + .build()); + + ConsensusCommitManager manager = createConsensusCommitManager(isolation, onePhaseCommitEnabled); + DistributedTransaction transaction = manager.begin(); + + // Act + Optional result = + transaction.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result.isPresent()).isTrue(); + assertThat(result.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result.get().getInt(BALANCE)).isEqualTo(INITIAL_BALANCE); + + transaction.insert( + Insert.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 1)) + .intValue(BALANCE, 100) + .build()); + transaction.commit(); + + // Assert + if (isolation == Isolation.SERIALIZABLE) { + // one for transaction read, one for validation read + verify(storage, times(2)).get(any(Get.class)); + + // one for prepare, one for commit + verify(storage, times(2)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit(anyString(), anyList(), any(TransactionState.class), anyLong()); + return; + } + verify(coordinator).putState(any(Coordinator.State.class)); + } else if (onePhaseCommitEnabled) { + // only one transaction read, no validation read + verify(storage).get(any(Get.class)); + + // one-phase commit, so only one mutation call + verify(storage).mutate(anyList()); + + // no commit-state should occur + verify(coordinator, never()).putState(any(Coordinator.State.class)); + } else { + // only one transaction read, no validation read + verify(storage).get(any(Get.class)); + + // one for prepare, one for commit + verify(storage, times(2)).mutate(anyList()); + + // commit-state should occur + if (isGroupCommitEnabled()) { + verify(coordinator) + .putStateForGroupCommit(anyString(), anyList(), any(TransactionState.class), anyLong()); + } else { + verify(coordinator).putState(any(Coordinator.State.class)); + } + } + + Optional result1 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 0)) + .build()); + assertThat(result1.isPresent()).isTrue(); + assertThat(result1.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result1.get().getInt(ACCOUNT_TYPE)).isEqualTo(0); + assertThat(result1.get().getInt(BALANCE)).isEqualTo(INITIAL_BALANCE); + + Optional result2 = + manager.get( + Get.newBuilder() + .namespace(namespace1) + .table(TABLE_1) + .partitionKey(Key.ofInt(ACCOUNT_ID, 0)) + .clusteringKey(Key.ofInt(ACCOUNT_TYPE, 1)) + .build()); + assertThat(result2.isPresent()).isTrue(); + assertThat(result2.get().getInt(ACCOUNT_ID)).isEqualTo(0); + assertThat(result2.get().getInt(ACCOUNT_TYPE)).isEqualTo(1); + assertThat(result2.get().getInt(BALANCE)).isEqualTo(100); + } + @Test @EnabledIf("isGroupCommitEnabled") void put_WhenTheOtherTransactionsIsDelayed_ShouldBeCommittedWithoutBlocked() throws Exception { From 0e6538ea066435b3305f8fbac1ec1d62ad119ed4 Mon Sep 17 00:00:00 2001 From: brfrn169 Date: Tue, 25 Nov 2025 15:34:48 +0900 Subject: [PATCH 2/2] Fix --- .../scalar/db/transaction/consensuscommit/CommitHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java b/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java index 3885544220..b0cc4ecd07 100644 --- a/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java +++ b/core/src/main/java/com/scalar/db/transaction/consensuscommit/CommitHandler.java @@ -848,7 +848,7 @@ public boolean equals(Object o) { return Iterables.elementsEqual(getSet, that.getSet) && Iterables.elementsEqual(scanSet, that.scanSet) && Iterables.elementsEqual(scannerSet, that.scannerSet) - && Iterables.elementsEqual(updatedRecordKeys, that.updatedRecordKeys); + && Objects.equals(updatedRecordKeys, that.updatedRecordKeys); } @Override