|
13 | 13 | import com.amazonaws.DnsResolver; |
14 | 14 | import com.amazonaws.SdkClientException; |
15 | 15 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; |
16 | | -import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream; |
17 | | -import com.amazonaws.util.Base16; |
18 | 16 | import com.sun.net.httpserver.HttpExchange; |
19 | 17 | import com.sun.net.httpserver.HttpHandler; |
20 | 18 |
|
|
25 | 23 | import org.elasticsearch.common.blobstore.BlobContainer; |
26 | 24 | import org.elasticsearch.common.blobstore.BlobPath; |
27 | 25 | import org.elasticsearch.common.blobstore.OperationPurpose; |
| 26 | +import org.elasticsearch.common.bytes.BytesArray; |
28 | 27 | import org.elasticsearch.common.bytes.BytesReference; |
| 28 | +import org.elasticsearch.common.hash.MessageDigests; |
29 | 29 | import org.elasticsearch.common.io.Streams; |
30 | 30 | import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; |
31 | 31 | import org.elasticsearch.common.lucene.store.InputStreamIndexInput; |
@@ -365,13 +365,12 @@ public void testWriteLargeBlob() throws Exception { |
365 | 365 | } |
366 | 366 | } else if (s3Request.isUploadPartRequest()) { |
367 | 367 | // upload part request |
368 | | - MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); |
369 | | - BytesReference bytes = Streams.readFully(md5); |
| 368 | + BytesReference bytes = Streams.readFully(exchange.getRequestBody()); |
370 | 369 | assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); |
371 | 370 | assertThat(contentLength, anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); |
372 | 371 |
|
373 | 372 | if (countDownUploads.decrementAndGet() % 2 == 0) { |
374 | | - exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); |
| 373 | + exchange.getResponseHeaders().add("ETag", getBase16MD5Digest(bytes)); |
375 | 374 | exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); |
376 | 375 | exchange.close(); |
377 | 376 | return; |
@@ -463,12 +462,11 @@ public void testWriteLargeBlobStreaming() throws Exception { |
463 | 462 | } |
464 | 463 | } else if (s3Request.isUploadPartRequest()) { |
465 | 464 | // upload part request |
466 | | - MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); |
467 | | - BytesReference bytes = Streams.readFully(md5); |
| 465 | + BytesReference bytes = Streams.readFully(exchange.getRequestBody()); |
468 | 466 |
|
469 | 467 | if (counterUploads.incrementAndGet() % 2 == 0) { |
470 | 468 | bytesReceived.addAndGet(bytes.length()); |
471 | | - exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); |
| 469 | + exchange.getResponseHeaders().add("ETag", getBase16MD5Digest(bytes)); |
472 | 470 | exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); |
473 | 471 | exchange.close(); |
474 | 472 | return; |
@@ -859,6 +857,21 @@ public void testTrimmedLogAndCappedSuppressedErrorOnMultiObjectDeletionException |
859 | 857 | } |
860 | 858 | } |
861 | 859 |
|
| 860 | + private static String getBase16MD5Digest(BytesReference bytesReference) { |
| 861 | + return MessageDigests.toHexString(MessageDigests.digest(bytesReference, MessageDigests.md5())); |
| 862 | + } |
| 863 | + |
| 864 | + public void testGetBase16MD5Digest() { |
| 865 | + // from Wikipedia, see also org.elasticsearch.common.hash.MessageDigestsTests.testMd5 |
| 866 | + assertBase16MD5Digest("", "d41d8cd98f00b204e9800998ecf8427e"); |
| 867 | + assertBase16MD5Digest("The quick brown fox jumps over the lazy dog", "9e107d9d372bb6826bd81d3542a419d6"); |
| 868 | + assertBase16MD5Digest("The quick brown fox jumps over the lazy dog.", "e4d909c290d0fb1ca068ffaddf22cbd0"); |
| 869 | + } |
| 870 | + |
| 871 | + private static void assertBase16MD5Digest(String input, String expectedDigestString) { |
| 872 | + assertEquals(expectedDigestString, getBase16MD5Digest(new BytesArray(input))); |
| 873 | + } |
| 874 | + |
862 | 875 | @Override |
863 | 876 | protected Matcher<Integer> getMaxRetriesMatcher(int maxRetries) { |
864 | 877 | // some attempts make meaningful progress and do not count towards the max retry limit |
|
0 commit comments