Skip to content

Commit 6b85c33

Browse files
committed
address review comments
1 parent 2a9f23b commit 6b85c33

File tree

2 files changed

+29
-27
lines changed

2 files changed

+29
-27
lines changed

modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@
5252
import java.util.concurrent.ConcurrentHashMap;
5353
import java.util.concurrent.Executor;
5454
import java.util.concurrent.TimeUnit;
55-
import java.util.concurrent.atomic.AtomicReference;
5655
import java.util.concurrent.atomic.LongAdder;
5756
import java.util.stream.Collectors;
5857

@@ -342,7 +341,17 @@ public BlobContainer blobContainer(BlobPath path) {
342341
return new S3BlobContainer(path, this);
343342
}
344343

345-
record DeletionExceptions(Exception exception, int count) {}
344+
private static class DeletionExceptions {
345+
private Exception exception = null;
346+
private int count = 0;
347+
348+
void useOrMaybeSuppress(Exception e) {
349+
if (count < MAX_DELETE_EXCEPTIONS) {
350+
exception = ExceptionsHelper.useOrSuppress(exception, e);
351+
count++;
352+
}
353+
}
354+
}
346355

347356
void deleteBlobs(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
348357
if (blobNames.hasNext() == false) {
@@ -352,19 +361,19 @@ void deleteBlobs(OperationPurpose purpose, Iterator<String> blobNames) throws IO
352361
final List<String> partition = new ArrayList<>();
353362
try {
354363
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
355-
final AtomicReference<DeletionExceptions> aex = new AtomicReference<>(new DeletionExceptions(null, 0));
364+
final var deletionExceptions = new DeletionExceptions();
356365
blobNames.forEachRemaining(key -> {
357366
partition.add(key);
358367
if (partition.size() == bulkDeletionBatchSize) {
359-
deletePartition(purpose, partition, aex);
368+
deletePartition(purpose, partition, deletionExceptions);
360369
partition.clear();
361370
}
362371
});
363372
if (partition.isEmpty() == false) {
364-
deletePartition(purpose, partition, aex);
373+
deletePartition(purpose, partition, deletionExceptions);
365374
}
366-
if (aex.get().exception != null) {
367-
throw aex.get().exception;
375+
if (deletionExceptions.exception != null) {
376+
throw deletionExceptions.exception;
368377
}
369378
} catch (Exception e) {
370379
throw new IOException("Failed to delete blobs " + partition.stream().limit(10).toList(), e);
@@ -376,9 +385,9 @@ void deleteBlobs(OperationPurpose purpose, Iterator<String> blobNames) throws IO
376385
*
377386
* @param purpose The {@link OperationPurpose} of the deletion
378387
* @param partition The list of blobs to delete
379-
* @param aex A holder for any exception(s) thrown during the deletion
388+
* @param deletionExceptions A holder for any exception(s) thrown during the deletion
380389
*/
381-
private void deletePartition(OperationPurpose purpose, List<String> partition, AtomicReference<DeletionExceptions> aex) {
390+
private void deletePartition(OperationPurpose purpose, List<String> partition, DeletionExceptions deletionExceptions) {
382391
final Iterator<TimeValue> retries = retryThrottledDeleteBackoffPolicy.iterator();
383392
int retryCounter = 0;
384393
while (true) {
@@ -399,7 +408,7 @@ private void deletePartition(OperationPurpose purpose, List<String> partition, A
399408
),
400409
e
401410
);
402-
useOrMaybeSuppress(aex, e);
411+
deletionExceptions.useOrMaybeSuppress(e);
403412
return;
404413
} catch (AmazonClientException e) {
405414
if (shouldRetryDelete(purpose) && RetryUtils.isThrottlingException(e)) {
@@ -408,26 +417,19 @@ private void deletePartition(OperationPurpose purpose, List<String> partition, A
408417
retryCounter++;
409418
} else {
410419
s3RepositoriesMetrics.retryDeletesHistogram().record(retryCounter);
411-
useOrMaybeSuppress(aex, e);
420+
deletionExceptions.useOrMaybeSuppress(e);
412421
return;
413422
}
414423
} else {
415424
// The AWS client threw any unexpected exception and did not execute the request at all so we do not
416425
// remove any keys from the outstanding deletes set.
417-
useOrMaybeSuppress(aex, e);
426+
deletionExceptions.useOrMaybeSuppress(e);
418427
return;
419428
}
420429
}
421430
}
422431
}
423432

424-
private void useOrMaybeSuppress(AtomicReference<DeletionExceptions> aex, Exception e) {
425-
var deletionExceptions = aex.get();
426-
if (deletionExceptions.count < MAX_DELETE_EXCEPTIONS) {
427-
aex.set(new DeletionExceptions(ExceptionsHelper.useOrSuppress(deletionExceptions.exception, e), deletionExceptions.count + 1));
428-
}
429-
}
430-
431433
/**
432434
* If there are remaining retries, pause for the configured interval then return true
433435
*

modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1105,14 +1105,14 @@ public void testSuppressedDeletionErrorsAreCapped() {
11051105
fail("expected only deletions");
11061106
}
11071107
});
1108-
try {
1109-
var maxNoOfDeletions = 2 * S3BlobStore.MAX_DELETE_EXCEPTIONS;
1110-
var blobs = randomList(1, maxNoOfDeletions * maxBulkDeleteSize, ESTestCase::randomIdentifier);
1111-
blobContainer.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobs.iterator());
1112-
fail("deletion should not succeed");
1113-
} catch (IOException e) {
1114-
assertThat(e.getCause().getSuppressed().length, lessThan(S3BlobStore.MAX_DELETE_EXCEPTIONS));
1115-
}
1108+
var maxNoOfDeletions = 2 * S3BlobStore.MAX_DELETE_EXCEPTIONS;
1109+
var blobs = randomList(1, maxNoOfDeletions * maxBulkDeleteSize, ESTestCase::randomIdentifier);
1110+
var exception = expectThrows(
1111+
IOException.class,
1112+
"deletion should not succeed",
1113+
() -> blobContainer.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobs.iterator())
1114+
);
1115+
assertThat(exception.getCause().getSuppressed().length, lessThan(S3BlobStore.MAX_DELETE_EXCEPTIONS));
11161116
}
11171117

11181118
@Override

0 commit comments

Comments
 (0)