-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Implement failIfAlreadyExists in S3 repositories #133030
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
f3c380d
35d427c
3d54119
8e8f3e7
3c2c833
c1974ad
4c479f4
a903309
ad360ca
cef778c
6979fbe
9c860cd
b5c0088
75a91cf
c975a60
249e8bb
884dcc8
a839a47
419f4ab
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -105,6 +105,7 @@ | |
import static org.hamcrest.Matchers.hasSize; | ||
import static org.hamcrest.Matchers.lessThan; | ||
import static org.hamcrest.Matchers.not; | ||
import static org.hamcrest.Matchers.startsWith; | ||
|
||
@SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") | ||
// Need to set up a new cluster for each test because cluster settings use randomized authentication settings | ||
|
@@ -425,7 +426,7 @@ public void testEnforcedCooldownPeriod() throws IOException { | |
if (randomBoolean()) { | ||
repository.blobStore() | ||
.blobContainer(repository.basePath()) | ||
.writeBlobAtomic(randomNonDataPurpose(), getRepositoryDataBlobName(modifiedRepositoryData.getGenId()), serialized, true); | ||
.writeBlobAtomic(randomNonDataPurpose(), getRepositoryDataBlobName(modifiedRepositoryData.getGenId()), serialized, false); | ||
} else { | ||
repository.blobStore() | ||
.blobContainer(repository.basePath()) | ||
|
@@ -434,7 +435,7 @@ public void testEnforcedCooldownPeriod() throws IOException { | |
getRepositoryDataBlobName(modifiedRepositoryData.getGenId()), | ||
serialized.streamInput(), | ||
serialized.length(), | ||
true | ||
false | ||
); | ||
} | ||
|
||
|
@@ -568,6 +569,27 @@ public void match(LogEvent event) { | |
} | ||
} | ||
|
||
public void testFailIfAlreadyExists() throws IOException { | ||
try (BlobStore store = newBlobStore()) { | ||
final BlobContainer container = store.blobContainer(BlobPath.EMPTY); | ||
final String blobName = randomAlphaOfLengthBetween(8, 12); | ||
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); | ||
|
||
// initial write blob | ||
writeBlob(container, blobName, new BytesArray(data), true); | ||
|
||
|
||
// override if failIfAlreadyExists is set to false | ||
writeBlob(container, blobName, new BytesArray(data), false); | ||
|
||
// throw exception if failIfAlreadyExists is set to true | ||
var exception = expectThrows(IOException.class, () -> writeBlob(container, blobName, new BytesArray(data), true)); | ||
|
||
assertThat(exception.getMessage(), startsWith("Unable to upload object")); | ||
|
||
container.delete(randomPurpose()); | ||
} | ||
} | ||
|
||
/** | ||
* S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. | ||
*/ | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -137,18 +137,15 @@ public long readBlobPreferredLength() { | |
return ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes(); | ||
} | ||
|
||
/** | ||
* This implementation ignores the failIfAlreadyExists flag as the S3 API has no way to enforce this due to its weak consistency model. | ||
*/ | ||
@Override | ||
public void writeBlob(OperationPurpose purpose, String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) | ||
throws IOException { | ||
assert BlobContainer.assertPurposeConsistency(purpose, blobName); | ||
assert inputStream.markSupported() : "No mark support on inputStream breaks the S3 SDK's ability to retry requests"; | ||
if (blobSize <= getLargeBlobThresholdInBytes()) { | ||
executeSingleUpload(purpose, blobStore, buildKey(blobName), inputStream, blobSize); | ||
executeSingleUpload(purpose, blobStore, buildKey(blobName), inputStream, blobSize, failIfAlreadyExists); | ||
} else { | ||
executeMultipartUpload(purpose, blobStore, buildKey(blobName), inputStream, blobSize); | ||
executeMultipartUpload(purpose, blobStore, buildKey(blobName), inputStream, blobSize, failIfAlreadyExists); | ||
} | ||
} | ||
|
||
|
@@ -366,7 +363,7 @@ public void copyBlob( | |
|
||
try { | ||
if (blobSize > getMaxCopySizeBeforeMultipart()) { | ||
executeMultipartCopy(purpose, s3SourceBlobContainer, sourceBlobName, blobName, blobSize); | ||
executeMultipartCopy(purpose, s3SourceBlobContainer, sourceBlobName, blobName, blobSize, false); | ||
} else { | ||
// metadata is inherited from source, but not canned ACL or storage class | ||
final var blobKey = buildKey(blobName); | ||
|
@@ -545,7 +542,8 @@ void executeSingleUpload( | |
final S3BlobStore s3BlobStore, | ||
final String blobName, | ||
final InputStream input, | ||
final long blobSize | ||
final long blobSize, | ||
final boolean failIfAlreadyExists | ||
) throws IOException { | ||
try (var clientReference = s3BlobStore.clientReference()) { | ||
// Extra safety checks | ||
|
@@ -565,6 +563,9 @@ void executeSingleUpload( | |
if (s3BlobStore.serverSideEncryption()) { | ||
putRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); | ||
} | ||
if (failIfAlreadyExists) { | ||
putRequestBuilder.ifNoneMatch("*"); | ||
} | ||
S3BlobStore.configureRequestForMetrics(putRequestBuilder, blobStore, Operation.PUT_OBJECT, purpose); | ||
|
||
final var putRequest = putRequestBuilder.build(); | ||
|
@@ -586,7 +587,8 @@ private void executeMultipart( | |
final String blobName, | ||
final long partSize, | ||
final long blobSize, | ||
final PartOperation partOperation | ||
final PartOperation partOperation, | ||
final boolean failIfAlreadyExists | ||
) throws IOException { | ||
|
||
ensureMultiPartUploadSize(blobSize); | ||
|
@@ -639,6 +641,11 @@ private void executeMultipart( | |
.key(blobName) | ||
.uploadId(uploadId) | ||
.multipartUpload(b -> b.parts(parts)); | ||
|
||
if (failIfAlreadyExists) { | ||
completeMultipartUploadRequestBuilder.ifNoneMatch("*"); | ||
} | ||
|
||
S3BlobStore.configureRequestForMetrics(completeMultipartUploadRequestBuilder, blobStore, operation, purpose); | ||
final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build(); | ||
try (var clientReference = s3BlobStore.clientReference()) { | ||
|
@@ -663,7 +670,8 @@ void executeMultipartUpload( | |
final S3BlobStore s3BlobStore, | ||
final String blobName, | ||
final InputStream input, | ||
final long blobSize | ||
final long blobSize, | ||
final boolean failIfAlreadyExists | ||
) throws IOException { | ||
executeMultipart( | ||
purpose, | ||
|
@@ -680,7 +688,8 @@ void executeMultipartUpload( | |
.uploadPart(uploadRequest, RequestBody.fromInputStream(input, partSize)); | ||
return CompletedPart.builder().partNumber(partNum).eTag(uploadResponse.eTag()).build(); | ||
} | ||
} | ||
}, | ||
failIfAlreadyExists | ||
); | ||
} | ||
|
||
|
@@ -699,7 +708,8 @@ void executeMultipartCopy( | |
final S3BlobContainer sourceContainer, | ||
final String sourceBlobName, | ||
final String destinationBlobName, | ||
final long blobSize | ||
final long blobSize, | ||
final boolean failIfAlreadyExists | ||
|
||
) throws IOException { | ||
final long copyPartSize = MAX_FILE_SIZE.getBytes(); | ||
final var destinationKey = buildKey(destinationBlobName); | ||
|
@@ -727,7 +737,8 @@ void executeMultipartCopy( | |
final var uploadPartCopyResponse = clientReference.client().uploadPartCopy(uploadPartCopyRequest); | ||
return CompletedPart.builder().partNumber(partNum).eTag(uploadPartCopyResponse.copyPartResult().eTag()).build(); | ||
} | ||
}) | ||
}), | ||
failIfAlreadyExists | ||
); | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -189,6 +189,7 @@ public void handle(final HttpExchange exchange) throws IOException { | |
|
||
} else if (request.isCompleteMultipartUploadRequest()) { | ||
final byte[] responseBody; | ||
boolean preconditionFailed = false; | ||
synchronized (uploads) { | ||
final var upload = removeUpload(request.getQueryParamOnce("uploadId")); | ||
if (upload == null) { | ||
|
@@ -206,19 +207,27 @@ public void handle(final HttpExchange exchange) throws IOException { | |
} | ||
} else { | ||
final var blobContents = upload.complete(extractPartEtags(Streams.readFully(exchange.getRequestBody()))); | ||
blobs.put(request.path(), blobContents); | ||
responseBody = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" | ||
+ "<CompleteMultipartUploadResult>\n" | ||
+ "<Bucket>" | ||
+ bucket | ||
+ "</Bucket>\n" | ||
+ "<Key>" | ||
+ request.path() | ||
+ "</Key>\n" | ||
+ "</CompleteMultipartUploadResult>").getBytes(StandardCharsets.UTF_8); | ||
|
||
if (isProtectOverwrite(exchange) && blobs.containsKey(request.path())) { | ||
|
||
preconditionFailed = true; | ||
responseBody = null; | ||
} else { | ||
blobs.put(request.path(), blobContents); | ||
responseBody = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" | ||
+ "<CompleteMultipartUploadResult>\n" | ||
+ "<Bucket>" | ||
+ bucket | ||
+ "</Bucket>\n" | ||
+ "<Key>" | ||
+ request.path() | ||
+ "</Key>\n" | ||
+ "</CompleteMultipartUploadResult>").getBytes(StandardCharsets.UTF_8); | ||
} | ||
} | ||
} | ||
if (responseBody == null) { | ||
if (preconditionFailed) { | ||
exchange.sendResponseHeaders(RestStatus.PRECONDITION_FAILED.getStatus(), -1); | ||
} else if (responseBody == null) { | ||
exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); | ||
} else { | ||
exchange.getResponseHeaders().add("Content-Type", "application/xml"); | ||
|
@@ -232,7 +241,9 @@ public void handle(final HttpExchange exchange) throws IOException { | |
} else if (request.isPutObjectRequest()) { | ||
// a copy request is a put request with an X-amz-copy-source header | ||
final var copySource = copySourceName(exchange); | ||
if (copySource != null) { | ||
if (isProtectOverwrite(exchange) && blobs.containsKey(request.path())) { | ||
|
||
exchange.sendResponseHeaders(RestStatus.PRECONDITION_FAILED.getStatus(), -1); | ||
} else if (copySource != null) { | ||
var sourceBlob = blobs.get(copySource); | ||
if (sourceBlob == null) { | ||
exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); | ||
|
@@ -540,6 +551,18 @@ private static HttpHeaderParser.Range parsePartRange(final HttpExchange exchange | |
return parseRangeHeader(sourceRangeHeaders.getFirst()); | ||
} | ||
|
||
private static boolean isProtectOverwrite(final HttpExchange exchange) { | ||
final var ifNoneMatch = exchange.getRequestHeaders().getFirst("If-None-Match"); | ||
|
||
if (ifNoneMatch == null) { | ||
return false; | ||
} else if (ifNoneMatch.equals("*")) { | ||
return true; | ||
} | ||
|
||
throw new AssertionError("invalid If-None-Match header: " + ifNoneMatch); | ||
DaveCTurner marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
MultipartUpload putUpload(String path) { | ||
final var upload = new MultipartUpload(UUIDs.randomBase64UUID(), path); | ||
synchronized (uploads) { | ||
|
Uh oh!
There was an error while loading. Please reload this page.