|
10 | 10 | import fixture.s3.S3HttpHandler;
|
11 | 11 |
|
12 | 12 | import com.amazonaws.http.AmazonHttpClient;
|
| 13 | +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; |
| 14 | +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; |
| 15 | +import com.amazonaws.services.s3.model.MultipartUpload; |
13 | 16 | import com.sun.net.httpserver.Headers;
|
14 | 17 | import com.sun.net.httpserver.HttpExchange;
|
15 | 18 | import com.sun.net.httpserver.HttpHandler;
|
16 | 19 |
|
| 20 | +import org.apache.logging.log4j.Level; |
| 21 | +import org.apache.logging.log4j.core.LogEvent; |
| 22 | +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; |
17 | 23 | import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
18 | 24 | import org.elasticsearch.cluster.metadata.RepositoryMetadata;
|
19 | 25 | import org.elasticsearch.cluster.service.ClusterService;
|
| 26 | +import org.elasticsearch.common.Strings; |
20 | 27 | import org.elasticsearch.common.blobstore.BlobContainer;
|
21 | 28 | import org.elasticsearch.common.blobstore.BlobPath;
|
22 | 29 | import org.elasticsearch.common.blobstore.BlobStore;
|
|
54 | 61 | import org.elasticsearch.telemetry.TestTelemetryPlugin;
|
55 | 62 | import org.elasticsearch.test.BackgroundIndexer;
|
56 | 63 | import org.elasticsearch.test.ESIntegTestCase;
|
| 64 | +import org.elasticsearch.test.MockLog; |
57 | 65 | import org.elasticsearch.test.junit.annotations.TestIssueLogging;
|
58 | 66 | import org.elasticsearch.threadpool.ThreadPool;
|
59 | 67 | import org.elasticsearch.xcontent.NamedXContentRegistry;
|
|
70 | 78 | import java.util.Map;
|
71 | 79 | import java.util.Objects;
|
72 | 80 | import java.util.Set;
|
| 81 | +import java.util.concurrent.CountDownLatch; |
73 | 82 | import java.util.concurrent.atomic.AtomicBoolean;
|
74 | 83 | import java.util.concurrent.atomic.AtomicLong;
|
75 | 84 | import java.util.stream.Collectors;
|
|
81 | 90 | import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
82 | 91 | import static org.hamcrest.Matchers.allOf;
|
83 | 92 | import static org.hamcrest.Matchers.containsString;
|
| 93 | +import static org.hamcrest.Matchers.empty; |
84 | 94 | import static org.hamcrest.Matchers.equalTo;
|
85 | 95 | import static org.hamcrest.Matchers.greaterThan;
|
86 | 96 | import static org.hamcrest.Matchers.hasEntry;
|
@@ -451,6 +461,106 @@ private Map<S3BlobStore.StatsKey, Long> getServerMetrics() {
|
451 | 461 | return Collections.emptyMap();
|
452 | 462 | }
|
453 | 463 |
|
| 464 | + public void testMultipartUploadCleanup() { |
| 465 | + final String repoName = randomRepositoryName(); |
| 466 | + createRepository(repoName, repositorySettings(repoName), true); |
| 467 | + |
| 468 | + createIndex("test-idx-1"); |
| 469 | + for (int i = 0; i < 100; i++) { |
| 470 | + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); |
| 471 | + } |
| 472 | + client().admin().indices().prepareRefresh().get(); |
| 473 | + |
| 474 | + final String snapshotName = randomIdentifier(); |
| 475 | + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) |
| 476 | + .setWaitForCompletion(true) |
| 477 | + .get(); |
| 478 | + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); |
| 479 | + assertThat( |
| 480 | + createSnapshotResponse.getSnapshotInfo().successfulShards(), |
| 481 | + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) |
| 482 | + ); |
| 483 | + |
| 484 | + final var repository = asInstanceOf( |
| 485 | + S3Repository.class, |
| 486 | + internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName) |
| 487 | + ); |
| 488 | + final var blobStore = asInstanceOf(S3BlobStore.class, asInstanceOf(BlobStoreWrapper.class, repository.blobStore()).delegate()); |
| 489 | + |
| 490 | + try (var clientRef = blobStore.clientReference()) { |
| 491 | + final var danglingBlobName = randomIdentifier(); |
| 492 | + final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( |
| 493 | + blobStore.bucket(), |
| 494 | + blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName |
| 495 | + ); |
| 496 | + initiateMultipartUploadRequest.putCustomQueryParameter( |
| 497 | + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, |
| 498 | + OperationPurpose.SNAPSHOT_DATA.getKey() |
| 499 | + ); |
| 500 | + final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest); |
| 501 | + |
| 502 | + final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix( |
| 503 | + repository.basePath().buildAsString() |
| 504 | + ); |
| 505 | + listMultipartUploadsRequest.putCustomQueryParameter( |
| 506 | + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, |
| 507 | + OperationPurpose.SNAPSHOT_DATA.getKey() |
| 508 | + ); |
| 509 | + assertEquals( |
| 510 | + List.of(multipartUploadResult.getUploadId()), |
| 511 | + clientRef.client() |
| 512 | + .listMultipartUploads(listMultipartUploadsRequest) |
| 513 | + .getMultipartUploads() |
| 514 | + .stream() |
| 515 | + .map(MultipartUpload::getUploadId) |
| 516 | + .toList() |
| 517 | + ); |
| 518 | + |
| 519 | + final var seenCleanupLogLatch = new CountDownLatch(1); |
| 520 | + MockLog.assertThatLogger(() -> { |
| 521 | + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)); |
| 522 | + safeAwait(seenCleanupLogLatch); |
| 523 | + }, |
| 524 | + S3BlobContainer.class, |
| 525 | + new MockLog.SeenEventExpectation( |
| 526 | + "found-dangling", |
| 527 | + S3BlobContainer.class.getCanonicalName(), |
| 528 | + Level.INFO, |
| 529 | + "found [1] possibly-dangling multipart uploads; will clean them up after finalizing the current snapshot deletions" |
| 530 | + ), |
| 531 | + new MockLog.SeenEventExpectation( |
| 532 | + "cleaned-dangling", |
| 533 | + S3BlobContainer.class.getCanonicalName(), |
| 534 | + Level.INFO, |
| 535 | + Strings.format( |
| 536 | + "cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]", |
| 537 | + multipartUploadResult.getUploadId(), |
| 538 | + repoName, |
| 539 | + danglingBlobName |
| 540 | + ) |
| 541 | + ) { |
| 542 | + @Override |
| 543 | + public void match(LogEvent event) { |
| 544 | + super.match(event); |
| 545 | + if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { |
| 546 | + seenCleanupLogLatch.countDown(); |
| 547 | + } |
| 548 | + } |
| 549 | + } |
| 550 | + ); |
| 551 | + |
| 552 | + assertThat( |
| 553 | + clientRef.client() |
| 554 | + .listMultipartUploads(listMultipartUploadsRequest) |
| 555 | + .getMultipartUploads() |
| 556 | + .stream() |
| 557 | + .map(MultipartUpload::getUploadId) |
| 558 | + .toList(), |
| 559 | + empty() |
| 560 | + ); |
| 561 | + } |
| 562 | + } |
| 563 | + |
454 | 564 | /**
|
455 | 565 | * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload.
|
456 | 566 | */
|
@@ -592,6 +702,9 @@ public void maybeTrack(final String rawRequest, Headers requestHeaders) {
|
592 | 702 | trackRequest("ListObjects");
|
593 | 703 | metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.LIST_OBJECTS, purpose), k -> new AtomicLong())
|
594 | 704 | .incrementAndGet();
|
| 705 | + } else if (Regex.simpleMatch("GET /*/?uploads&*", request)) { |
| 706 | + // TODO track ListMultipartUploads requests |
| 707 | + logger.info("--> ListMultipartUploads not tracked [{}] with parsed purpose [{}]", request, purpose.getKey()); |
595 | 708 | } else if (Regex.simpleMatch("GET /*/*", request)) {
|
596 | 709 | trackRequest("GetObject");
|
597 | 710 | metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.GET_OBJECT, purpose), k -> new AtomicLong())
|
|
0 commit comments