Skip to content

Commit 1ed9c98

Browse files
DaveCTurnermridula-s109
authored andcommitted
Fix testFailIfAlreadyExists (elastic#134716)
This test was only waiting for a second for the tasks to complete, which is too short to be reliable in CI. This commit reworks the test to use `runInParallel` which waits more tenaciously for completion. Closes elastic#134669
1 parent 64fa87d commit 1ed9c98

File tree

2 files changed

+46
-36
lines changed

2 files changed

+46
-36
lines changed

modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java

Lines changed: 46 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,12 @@
8383
import java.util.Objects;
8484
import java.util.Set;
8585
import java.util.concurrent.CountDownLatch;
86-
import java.util.concurrent.Executors;
86+
import java.util.concurrent.CyclicBarrier;
8787
import java.util.concurrent.Semaphore;
88-
import java.util.concurrent.TimeUnit;
8988
import java.util.concurrent.atomic.AtomicBoolean;
9089
import java.util.concurrent.atomic.AtomicInteger;
9190
import java.util.concurrent.atomic.AtomicLong;
91+
import java.util.concurrent.atomic.AtomicReference;
9292
import java.util.stream.Collectors;
9393
import java.util.stream.StreamSupport;
9494

@@ -572,52 +572,65 @@ public void match(LogEvent event) {
572572
}
573573
}
574574

575-
public void testFailIfAlreadyExists() throws IOException, InterruptedException {
575+
public void testFailIfAlreadyExists() throws IOException {
576576
try (BlobStore store = newBlobStore()) {
577577
final BlobContainer container = store.blobContainer(BlobPath.EMPTY);
578578
final String blobName = randomAlphaOfLengthBetween(8, 12);
579579

580-
final byte[] data;
581-
if (randomBoolean()) {
582-
// single upload
583-
data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
584-
} else {
585-
// multipart upload
586-
int thresholdInBytes = Math.toIntExact(((S3BlobContainer) container).getLargeBlobThresholdInBytes());
587-
data = randomBytes(randomIntBetween(thresholdInBytes, thresholdInBytes + scaledRandomIntBetween(1024, 1 << 16)));
588-
}
589-
590-
// initial write blob
591-
AtomicInteger exceptionCount = new AtomicInteger(0);
592-
try (var executor = Executors.newFixedThreadPool(2)) {
593-
for (int i = 0; i < 2; i++) {
594-
executor.submit(() -> {
595-
try {
596-
writeBlob(container, blobName, new BytesArray(data), true);
597-
} catch (IOException e) {
598-
exceptionCount.incrementAndGet();
599-
}
600-
});
580+
// initial write: exactly one of these may succeed
581+
final var parallelWrites = between(1, 4);
582+
final var exceptionCount = new AtomicInteger(0);
583+
final var successData = new AtomicReference<byte[]>();
584+
final var writeBarrier = new CyclicBarrier(parallelWrites);
585+
runInParallel(parallelWrites, ignored -> {
586+
final byte[] data = getRandomData(container);
587+
safeAwait(writeBarrier);
588+
try {
589+
writeBlob(container, blobName, new BytesArray(data), true);
590+
assertTrue(successData.compareAndSet(null, data));
591+
} catch (IOException e) {
592+
exceptionCount.incrementAndGet();
601593
}
602-
executor.shutdown();
603-
var done = executor.awaitTermination(1, TimeUnit.SECONDS);
604-
assertTrue(done);
605-
}
594+
});
606595

607-
assertEquals(1, exceptionCount.get());
596+
// the data in the blob comes from the successful write
597+
assertNotNull(successData.get());
598+
assertArrayEquals(successData.get(), readBlobFully(container, blobName, successData.get().length));
608599

609-
// overwrite if failIfAlreadyExists is set to false
610-
writeBlob(container, blobName, new BytesArray(data), false);
600+
// all the other writes failed with an IOException
601+
assertEquals(parallelWrites - 1, exceptionCount.get());
611602

612-
// throw exception if failIfAlreadyExists is set to true
613-
var exception = expectThrows(IOException.class, () -> writeBlob(container, blobName, new BytesArray(data), true));
603+
// verify that another write succeeds
604+
final var overwriteData = getRandomData(container);
605+
writeBlob(container, blobName, new BytesArray(overwriteData), false);
606+
assertArrayEquals(overwriteData, readBlobFully(container, blobName, overwriteData.length));
614607

608+
// throw exception if failIfAlreadyExists is set to true
609+
var exception = expectThrows(
610+
IOException.class,
611+
() -> writeBlob(container, blobName, new BytesArray(getRandomData(container)), true)
612+
);
615613
assertThat(exception.getMessage(), startsWith("Unable to upload"));
616614

615+
assertArrayEquals(overwriteData, readBlobFully(container, blobName, overwriteData.length));
616+
617617
container.delete(randomPurpose());
618618
}
619619
}
620620

621+
private static byte[] getRandomData(BlobContainer container) {
622+
final byte[] data;
623+
if (randomBoolean()) {
624+
// single upload
625+
data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
626+
} else {
627+
// multipart upload
628+
int thresholdInBytes = Math.toIntExact(((S3BlobContainer) container).getLargeBlobThresholdInBytes());
629+
data = randomBytes(randomIntBetween(thresholdInBytes, thresholdInBytes + scaledRandomIntBetween(1024, 1 << 16)));
630+
}
631+
return data;
632+
}
633+
621634
/**
622635
* S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload.
623636
*/

muted-tests.yml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -540,9 +540,6 @@ tests:
540540
- class: org.elasticsearch.aggregations.bucket.AggregationReductionCircuitBreakingIT
541541
method: testCBTrippingOnReduction
542542
issue: https://github.com/elastic/elasticsearch/issues/134667
543-
- class: org.elasticsearch.repositories.s3.S3BlobStoreRepositoryTests
544-
method: testFailIfAlreadyExists
545-
issue: https://github.com/elastic/elasticsearch/issues/134669
546543
- class: org.elasticsearch.xpack.esql.expression.function.scalar.score.DecayTests
547544
method: "testEvaluateBlockWithNulls {TestCase=<integer>, <integer>, <integer>, <_source> #2}"
548545
issue: https://github.com/elastic/elasticsearch/issues/134679

0 commit comments

Comments
 (0)