diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties
index 1d4e4e9dcc72c..29cd82ca60d81 100644
--- a/build-tools-internal/version.properties
+++ b/build-tools-internal/version.properties
@@ -17,7 +17,6 @@ jna = 5.12.1
netty = 4.1.118.Final
commons_lang3 = 3.9
google_oauth_client = 1.34.1
-awsv1sdk = 1.12.746
awsv2sdk = 2.30.38
reactive_streams = 1.0.4
diff --git a/docs/changelog/126843.yaml b/docs/changelog/126843.yaml
new file mode 100644
index 0000000000000..77d3916c31955
--- /dev/null
+++ b/docs/changelog/126843.yaml
@@ -0,0 +1,90 @@
+pr: 126843
+summary: Upgrade `repository-s3` to AWS SDK v2
+area: Snapshot/Restore
+type: breaking
+issues:
+ - 120993
+highlight:
+ title: Upgrade `repository-s3` to AWS SDK v2
+ body: >-
+ In earlier versions of {es} the `repository-s3` plugin was based on the AWS
+ SDK v1. AWS will withdraw support for this SDK before the end of the life
+ of {es} {minor-version} so we have migrated this plugin to the newer AWS SDK v2.
+
+ The two SDKs are not quite compatible, so please check the breaking changes
+ documentation and test the new version thoroughly before upgrading any
+ production workloads.
+ notable: true
+breaking:
+ title: Upgrade `repository-s3` to AWS SDK v2
+ area: Cluster and node setting
+ details: >-
+ In earlier versions of {es} the `repository-s3` plugin was based on the AWS
+ SDK v1. AWS will withdraw support for this SDK before the end of the life
+ of {es} {minor-version} so we must migrate to the newer AWS SDK v2.
+
+ Unfortunately there are several differences between the two AWS SDK
+ versions which may require you to adjust your system configuration when
+ upgrading to {es} {minor-version} or later. These differences include, but
+ may not be limited to, the following items.
+
+ * AWS SDK v2 requires users to specify the region to use for signing
+ requests, or else to run in an environment in which it can determine the
+ correct region automatically. The older SDK would try to determine the
+ region based on the endpoint URL as specified with the
+ `s3.client.${CLIENT_NAME}.endpoint` setting, together with other data
+ drawn from the operating environment, and would ultimately fall back to
+ `us-east-1` if no better value could be found.
+
+ * AWS SDK v2 does not support the EC2 IMDSv1 protocol.
+
+ * AWS SDK v2 does not support the
+ `com.amazonaws.sdk.ec2MetadataServiceEndpointOverride` system property.
+
+ * AWS SDK v2 does not permit specifying a choice between HTTP and HTTPS so
+ the `s3.client.${CLIENT_NAME}.protocol` setting is deprecated and no longer
+ has any effect.
+
+ * AWS SDK v2 does not permit control over throttling for retries, so the
+ the `s3.client.${CLIENT_NAME}.use_throttle_retries` setting is deprecated
+ and no longer has any effect.
+
+ * AWS SDK v2 requires the use of the V4 signature algorithm, so the
+ `s3.client.${CLIENT_NAME}.signer_override` setting is deprecated and no
+ longer has any effect.
+
+ * AWS SDK v2 does not support the `log-delivery-write` canned ACL.
+
+ * AWS SDK v2 counts 4xx responses differently in its metrics reporting.
+
+ * AWS SDK v2 always uses the regional STS endpoint, whereas AWS SDK v2
+ could use either a regional endpoint or the global
+ `https://sts.amazonaws.com` one.
+
+ impact: >-
+ If you use the `repository-s3` module, test your upgrade thoroughly before
+ upgrading any production workloads.
+
+ Adapt your configuration to the new SDK functionality. This includes, but
+ may not be limited to, the following items.
+
+ * Specify the correct signing region using the
+ `s3.client.${CLIENT_NAME}.region` setting on each node. {es} will try and
+ determine the correct region based on the endpoint URL and other data
+ drawn from the operating environment but cannot guarantee to do so
+ correctly in all cases.
+
+ * If you use IMDS to determine the availability zone of a node or to obtain
+ credentials for accessing the EC2 API, ensure that it supports the IMDSv2
+ protocol.
+
+ * If applicable, discontinue use of the
+ `com.amazonaws.sdk.ec2MetadataServiceEndpointOverride` system property.
+
+ * If applicable, specify that you wish to use the insecure HTTP protocol to
+ access the S3 API by setting `s3.client.${CLIENT_NAME}.endpoint` to a URL
+ which starts with `http://`.
+
+ * If applicable, discontinue use of the `log-delivery-write` canned ACL.
+
+ notable: true
diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml
index a2321fc1f81ee..d546e80d1a8a4 100644
--- a/gradle/verification-metadata.xml
+++ b/gradle/verification-metadata.xml
@@ -86,36 +86,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -4792,6 +4762,11 @@
+
+
+
+
+
@@ -4812,6 +4787,11 @@
+
+
+
+
+
@@ -4912,11 +4892,21 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle
index 7e1dd05f1f289..9f77d2489cb81 100644
--- a/modules/repository-s3/build.gradle
+++ b/modules/repository-s3/build.gradle
@@ -19,27 +19,49 @@ esplugin {
}
dependencies {
- api "com.amazonaws:aws-java-sdk-s3:${versions.awsv1sdk}"
- api "com.amazonaws:aws-java-sdk-core:${versions.awsv1sdk}"
- api "com.amazonaws:aws-java-sdk-sts:${versions.awsv1sdk}"
- api "com.amazonaws:jmespath-java:${versions.awsv1sdk}"
- api "org.apache.httpcomponents:httpclient:${versions.httpclient}"
- api "org.apache.httpcomponents:httpcore:${versions.httpcore}"
- api "commons-logging:commons-logging:${versions.commonslogging}"
- api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}"
- api "commons-codec:commons-codec:${versions.commonscodec}"
- api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
- api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
- api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
- api "joda-time:joda-time:2.10.14"
-
- // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here,
- // and whitelist this hack in JarHell
- api 'javax.xml.bind:jaxb-api:2.2.2'
+ implementation "software.amazon.awssdk:annotations:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:apache-client:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:auth:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:aws-core:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:aws-xml-protocol:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:http-client-spi:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:identity-spi:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:metrics-spi:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:regions:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:retries-spi:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:retries:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:s3:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:sdk-core:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:services:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:sts:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:utils:${versions.awsv2sdk}"
+
+ implementation "org.apache.httpcomponents:httpclient:${versions.httpclient}"
+
+ runtimeOnly "commons-codec:commons-codec:${versions.commonscodec}"
+ runtimeOnly "commons-logging:commons-logging:${versions.commonslogging}"
+ runtimeOnly "joda-time:joda-time:2.10.14"
+ runtimeOnly "org.apache.httpcomponents:httpcore:${versions.httpcore}"
+ runtimeOnly "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}"
+ runtimeOnly "org.reactivestreams:reactive-streams:${versions.reactive_streams}"
+ runtimeOnly "org.slf4j:slf4j-api:${versions.slf4j}"
+ runtimeOnly "software.amazon.awssdk:arns:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:aws-query-protocol:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:checksums-spi:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:checksums:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:endpoints-spi:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:http-auth:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:http-auth-aws:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:http-auth-spi:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:json-utils:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:profiles:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:protocol-core:${versions.awsv2sdk}"
+ runtimeOnly "software.amazon.awssdk:third-party-jackson-core:${versions.awsv2sdk}"
testImplementation project(':test:fixtures:s3-fixture')
+ testImplementation "software.amazon.awssdk:endpoints-spi:${versions.awsv2sdk}"
+ internalClusterTestImplementation project(':test:fixtures:aws-fixture-utils')
internalClusterTestImplementation project(':test:fixtures:minio-fixture')
internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}"
@@ -68,10 +90,34 @@ restResources {
}
tasks.named("dependencyLicenses").configure {
- mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk'
- mapping from: /jmespath-java.*/, to: 'aws-java-sdk'
- mapping from: /jackson-.*/, to: 'jackson'
- mapping from: /jaxb-.*/, to: 'jaxb'
+ mapping from: 'annotations', to: 'aws-sdk-2'
+ mapping from: 'apache-client', to: 'aws-sdk-2'
+ mapping from: 'arns', to: 'aws-sdk-2'
+ mapping from: 'auth', to: 'aws-sdk-2'
+ mapping from: 'aws-core', to: 'aws-sdk-2'
+ mapping from: 'aws-query-protocol', to: 'aws-sdk-2'
+ mapping from: 'aws-xml-protocol', to: 'aws-sdk-2'
+ mapping from: 'checksums', to: 'aws-sdk-2'
+ mapping from: 'checksums-spi', to: 'aws-sdk-2'
+ mapping from: 'endpoints-spi', to: 'aws-sdk-2'
+ mapping from: 'http-auth', to: 'aws-sdk-2'
+ mapping from: 'http-auth-aws', to: 'aws-sdk-2'
+ mapping from: 'http-auth-spi', to: 'aws-sdk-2'
+ mapping from: 'http-client-spi', to: 'aws-sdk-2'
+ mapping from: 'identity-spi', to: 'aws-sdk-2'
+ mapping from: 'json-utils', to: 'aws-sdk-2'
+ mapping from: 'metrics-spi', to: 'aws-sdk-2'
+ mapping from: 'profiles', to: 'aws-sdk-2'
+ mapping from: 'protocol-core', to: 'aws-sdk-2'
+ mapping from: 'regions', to: 'aws-sdk-2'
+ mapping from: 'retries', to: 'aws-sdk-2'
+ mapping from: 'retries-spi', to: 'aws-sdk-2'
+ mapping from: 's3', to: 'aws-sdk-2'
+ mapping from: 'sdk-core', to: 'aws-sdk-2'
+ mapping from: 'services', to: 'aws-sdk-2'
+ mapping from: 'sts', to: 'aws-sdk-2'
+ mapping from: 'third-party-jackson-core', to: 'aws-sdk-2'
+ mapping from: 'utils', to: 'aws-sdk-2'
}
esplugin.bundleSpec.from('config/repository-s3') {
@@ -85,23 +131,61 @@ tasks.named("internalClusterTest").configure {
tasks.named("thirdPartyAudit").configure {
ignoreMissingClasses(
- // classes are missing
- 'javax.servlet.ServletContextEvent',
- 'javax.servlet.ServletContextListener',
- 'org.apache.avalon.framework.logger.Logger',
- 'org.apache.log.Hierarchy',
- 'org.apache.log.Logger',
- 'javax.jms.Message',
- // We don't use the kms dependency
- 'com.amazonaws.services.kms.AWSKMS',
- 'com.amazonaws.services.kms.AWSKMSClient',
- 'com.amazonaws.services.kms.AWSKMSClientBuilder',
- 'com.amazonaws.services.kms.model.DecryptRequest',
- 'com.amazonaws.services.kms.model.DecryptResult',
- 'com.amazonaws.services.kms.model.EncryptRequest',
- 'com.amazonaws.services.kms.model.EncryptResult',
- 'com.amazonaws.services.kms.model.GenerateDataKeyRequest',
- 'com.amazonaws.services.kms.model.GenerateDataKeyResult',
- 'javax.activation.DataHandler'
+ // missing/unused classes
+ 'javax.servlet.ServletContextEvent',
+ 'javax.servlet.ServletContextListener',
+ 'org.apache.avalon.framework.logger.Logger',
+ 'org.apache.log.Hierarchy',
+ 'org.apache.log.Logger',
+ 'javax.jms.Message',
+
+ // We use the Apache HTTP client rather than an AWS common runtime (CRT) one, so we don't need any of these classes:
+ 'software.amazon.awssdk.crt.CRT',
+ 'software.amazon.awssdk.crt.auth.credentials.Credentials',
+ 'software.amazon.awssdk.crt.auth.credentials.CredentialsProvider',
+ 'software.amazon.awssdk.crt.auth.credentials.DelegateCredentialsProvider$DelegateCredentialsProviderBuilder',
+ 'software.amazon.awssdk.crt.auth.signing.AwsSigner',
+ 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig$AwsSignatureType',
+ 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig$AwsSignedBodyHeaderType',
+ 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig$AwsSigningAlgorithm',
+ 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig',
+ 'software.amazon.awssdk.crt.auth.signing.AwsSigningResult',
+ 'software.amazon.awssdk.crt.http.HttpHeader',
+ 'software.amazon.awssdk.crt.http.HttpMonitoringOptions',
+ 'software.amazon.awssdk.crt.http.HttpProxyEnvironmentVariableSetting$HttpProxyEnvironmentVariableType',
+ 'software.amazon.awssdk.crt.http.HttpProxyEnvironmentVariableSetting',
+ 'software.amazon.awssdk.crt.http.HttpProxyOptions',
+ 'software.amazon.awssdk.crt.http.HttpRequest',
+ 'software.amazon.awssdk.crt.http.HttpRequestBodyStream',
+ 'software.amazon.awssdk.crt.io.ClientBootstrap',
+ 'software.amazon.awssdk.crt.io.ExponentialBackoffRetryOptions',
+ 'software.amazon.awssdk.crt.io.StandardRetryOptions',
+ 'software.amazon.awssdk.crt.io.TlsCipherPreference',
+ 'software.amazon.awssdk.crt.io.TlsContext',
+ 'software.amazon.awssdk.crt.io.TlsContextOptions',
+ 'software.amazon.awssdk.crt.s3.ChecksumAlgorithm',
+ 'software.amazon.awssdk.crt.s3.ChecksumConfig$ChecksumLocation',
+ 'software.amazon.awssdk.crt.s3.ChecksumConfig',
+ 'software.amazon.awssdk.crt.s3.ResumeToken',
+ 'software.amazon.awssdk.crt.s3.S3Client',
+ 'software.amazon.awssdk.crt.s3.S3ClientOptions',
+ 'software.amazon.awssdk.crt.s3.S3FinishedResponseContext',
+ 'software.amazon.awssdk.crt.s3.S3MetaRequest',
+ 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions$MetaRequestType',
+ 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions',
+ 'software.amazon.awssdk.crt.s3.S3MetaRequestProgress',
+ 'software.amazon.awssdk.crt.s3.S3MetaRequestResponseHandler',
+ 'software.amazon.awssdk.crtcore.CrtConfigurationUtils',
+ 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$Builder',
+ 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$DefaultBuilder',
+ 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration',
+ 'software.amazon.awssdk.crtcore.CrtProxyConfiguration$Builder',
+ 'software.amazon.awssdk.crtcore.CrtProxyConfiguration$DefaultBuilder',
+ 'software.amazon.awssdk.crtcore.CrtProxyConfiguration',
+
+ // We don't use anything eventstream-based so these classes are not needed:
+ 'software.amazon.eventstream.HeaderValue',
+ 'software.amazon.eventstream.Message',
+ 'software.amazon.eventstream.MessageDecoder'
)
}
diff --git a/modules/repository-s3/licenses/aws-java-sdk-LICENSE.txt b/modules/repository-s3/licenses/aws-java-sdk-LICENSE.txt
deleted file mode 100644
index 98d1f9319f374..0000000000000
--- a/modules/repository-s3/licenses/aws-java-sdk-LICENSE.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-Apache License
-Version 2.0, January 2004
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-
- 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and
- 2. You must cause any modified files to carry prominent notices stating that You changed the files; and
- 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
- 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
-
-You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above.
-
-JSON processing code subject to the JSON License from JSON.org:
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-The Software shall be used for Good, not Evil.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/modules/repository-s3/licenses/aws-java-sdk-NOTICE.txt b/modules/repository-s3/licenses/aws-java-sdk-NOTICE.txt
deleted file mode 100644
index 565bd6085c71a..0000000000000
--- a/modules/repository-s3/licenses/aws-java-sdk-NOTICE.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-AWS SDK for Java
-Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-
-This product includes software developed by
-Amazon Technologies, Inc (http://www.amazon.com/).
-
-**********************
-THIRD PARTY COMPONENTS
-**********************
-This software includes third party software subject to the following copyrights:
-- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty.
-- JSON parsing and utility functions from JSON.org - Copyright 2002 JSON.org.
-- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
-
-The licenses for these third party components are included in LICENSE.txt
diff --git a/modules/repository-s3/licenses/aws-sdk-2-LICENSE.txt b/modules/repository-s3/licenses/aws-sdk-2-LICENSE.txt
new file mode 100644
index 0000000000000..1eef70a9b9f42
--- /dev/null
+++ b/modules/repository-s3/licenses/aws-sdk-2-LICENSE.txt
@@ -0,0 +1,206 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ Note: Other license terms may apply to certain, identified software files contained within or distributed
+ with the accompanying software if such terms are included in the directory containing the accompanying software.
+ Such other license terms will then apply in lieu of the terms of the software license above.
diff --git a/modules/repository-s3/licenses/aws-sdk-2-NOTICE.txt b/modules/repository-s3/licenses/aws-sdk-2-NOTICE.txt
new file mode 100644
index 0000000000000..f3c4db7d1724e
--- /dev/null
+++ b/modules/repository-s3/licenses/aws-sdk-2-NOTICE.txt
@@ -0,0 +1,26 @@
+AWS SDK for Java 2.0
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+This product includes software developed by
+Amazon Technologies, Inc (http://www.amazon.com/).
+
+**********************
+THIRD PARTY COMPONENTS
+**********************
+This software includes third party software subject to the following copyrights:
+- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty.
+- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
+- Apache Commons Lang - https://github.com/apache/commons-lang
+- Netty Reactive Streams - https://github.com/playframework/netty-reactive-streams
+- Jackson-core - https://github.com/FasterXML/jackson-core
+- Jackson-dataformat-cbor - https://github.com/FasterXML/jackson-dataformats-binary
+
+The licenses for these third party components are included in LICENSE.txt
+
+- For Apache Commons Lang see also this required NOTICE:
+ Apache Commons Lang
+ Copyright 2001-2020 The Apache Software Foundation
+
+ This product includes software developed at
+ The Apache Software Foundation (https://www.apache.org/).
+
diff --git a/modules/repository-s3/licenses/jackson-LICENSE b/modules/repository-s3/licenses/jackson-LICENSE
deleted file mode 100644
index f5f45d26a49d6..0000000000000
--- a/modules/repository-s3/licenses/jackson-LICENSE
+++ /dev/null
@@ -1,8 +0,0 @@
-This copy of Jackson JSON processor streaming parser/generator is licensed under the
-Apache (Software) License, version 2.0 ("the License").
-See the License for details about distribution rights, and the
-specific rights regarding derivate works.
-
-You may obtain a copy of the License at:
-
-http://www.apache.org/licenses/LICENSE-2.0
diff --git a/modules/repository-s3/licenses/jackson-NOTICE b/modules/repository-s3/licenses/jackson-NOTICE
deleted file mode 100644
index 4c976b7b4cc58..0000000000000
--- a/modules/repository-s3/licenses/jackson-NOTICE
+++ /dev/null
@@ -1,20 +0,0 @@
-# Jackson JSON processor
-
-Jackson is a high-performance, Free/Open Source JSON processing library.
-It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
-been in development since 2007.
-It is currently developed by a community of developers, as well as supported
-commercially by FasterXML.com.
-
-## Licensing
-
-Jackson core and extension components may licensed under different licenses.
-To find the details that apply to this artifact see the accompanying LICENSE file.
-For more information, including possible other licensing options, contact
-FasterXML.com (http://fasterxml.com).
-
-## Credits
-
-A list of contributors may be found from CREDITS file, which is included
-in some artifacts (usually source distributions); but is always available
-from the source code management (SCM) system project uses.
diff --git a/modules/repository-s3/licenses/jaxb-LICENSE.txt b/modules/repository-s3/licenses/jaxb-LICENSE.txt
deleted file mode 100644
index 833a843cfeee1..0000000000000
--- a/modules/repository-s3/licenses/jaxb-LICENSE.txt
+++ /dev/null
@@ -1,274 +0,0 @@
-COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1
-
-1. Definitions.
-
- 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications.
-
- 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
-
- 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
-
- 1.4. "Executable" means the Covered Software in any form other than Source Code.
-
- 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License.
-
- 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
-
- 1.7. "License" means this document.
-
- 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
-
- 1.9. "Modifications" means the Source Code and Executable form of any of the following:
-
- A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
-
- B. Any new file that contains any part of the Original Software or previous Modification; or
-
- C. Any new file that is contributed or otherwise made available under the terms of this License.
-
- 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License.
-
- 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
-
- 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
-
- 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-2. License Grants.
-
- 2.1. The Initial Developer Grant.
-
- Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
-
- (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
-
- (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
-
- (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
-
- (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
-
- 2.2. Contributor Grant.
-
- Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
- (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
-
- (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
-
- (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
-
- (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations.
-
- 3.1. Availability of Source Code.
-
- Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
-
- 3.2. Modifications.
-
- The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
-
- 3.3. Required Notices.
-
- You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
-
- 3.4. Application of Additional Terms.
-
- You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
-
- 3.5. Distribution of Executable Versions.
-
- You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
-
- 3.6. Larger Works.
-
- You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
-
-4. Versions of the License.
-
- 4.1. New Versions.
-
- Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
-
- 4.2. Effect of New Versions.
-
- You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
-
- 4.3. Modified Versions.
-
- When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
-
-5. DISCLAIMER OF WARRANTY.
-
- COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-6. TERMINATION.
-
- 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
-
- 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
-
- 6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license.
-
- 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
-
-7. LIMITATION OF LIABILITY.
-
- UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-8. U.S. GOVERNMENT END USERS.
-
- The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
-
-9. MISCELLANEOUS.
-
- This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
-
-10. RESPONSIBILITY FOR CLAIMS.
-
- As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-
-----------
-NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
-The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
-
-
-
-
-The GNU General Public License (GPL) Version 2, June 1991
-
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
-
-Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification follow.
-
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
-
- a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
-
- b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
-
- c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
-
- a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
- b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
-
- c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
-
-If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
-
-5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
-
-6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
-
-This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
-
-NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
-
- One line to give the program's name and a brief idea of what it does.
-
- Copyright (C)
-
- This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
-
- Gnomovision version 69, Copyright (C) year name of author
- Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
-
- Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
- signature of Ty Coon, 1 April 1989
- Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
-
-
-"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
-
-Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code."
-
-Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
-
-As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version.
diff --git a/modules/repository-s3/licenses/jaxb-NOTICE.txt b/modules/repository-s3/licenses/jaxb-NOTICE.txt
deleted file mode 100644
index 8d1c8b69c3fce..0000000000000
--- a/modules/repository-s3/licenses/jaxb-NOTICE.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/modules/repository-s3/licenses/reactive-streams-LICENSE.txt b/modules/repository-s3/licenses/reactive-streams-LICENSE.txt
new file mode 100644
index 0000000000000..1e141c13ddba2
--- /dev/null
+++ b/modules/repository-s3/licenses/reactive-streams-LICENSE.txt
@@ -0,0 +1,7 @@
+MIT No Attribution
+
+Copyright 2014 Reactive Streams
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/modules/repository-s3/licenses/reactive-streams-NOTICE.txt b/modules/repository-s3/licenses/reactive-streams-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/modules/repository-s3/licenses/slf4j-api-LICENSE.txt b/modules/repository-s3/licenses/slf4j-api-LICENSE.txt
new file mode 100644
index 0000000000000..8fda22f4d72f6
--- /dev/null
+++ b/modules/repository-s3/licenses/slf4j-api-LICENSE.txt
@@ -0,0 +1,21 @@
+Copyright (c) 2004-2014 QOS.ch
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/modules/repository-s3/licenses/slf4j-api-NOTICE.txt b/modules/repository-s3/licenses/slf4j-api-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/modules/repository-s3/qa/insecure-credentials/build.gradle b/modules/repository-s3/qa/insecure-credentials/build.gradle
index 4346e1f4547e1..bac3e00f5dadb 100644
--- a/modules/repository-s3/qa/insecure-credentials/build.gradle
+++ b/modules/repository-s3/qa/insecure-credentials/build.gradle
@@ -11,6 +11,15 @@ dependencies {
testImplementation project(':modules:repository-s3')
testImplementation project(':test:framework')
testImplementation project(':server')
+
+ testImplementation "software.amazon.awssdk:auth:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:aws-core:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:http-client-spi:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:identity-spi:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:regions:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:s3:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:sdk-core:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:utils:${versions.awsv2sdk}"
}
tasks.named("test").configure {
diff --git a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java
index 17b56131938d8..022fe15c03b05 100644
--- a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java
+++ b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java
@@ -9,778 +9,1710 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.AmazonServiceException;
-import com.amazonaws.AmazonWebServiceRequest;
-import com.amazonaws.HttpMethod;
-import com.amazonaws.regions.Region;
-import com.amazonaws.services.s3.AbstractAmazonS3;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.S3ClientOptions;
-import com.amazonaws.services.s3.S3ResponseMetadata;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.AccessControlList;
-import com.amazonaws.services.s3.model.Bucket;
-import com.amazonaws.services.s3.model.BucketCrossOriginConfiguration;
-import com.amazonaws.services.s3.model.BucketLifecycleConfiguration;
-import com.amazonaws.services.s3.model.BucketLoggingConfiguration;
-import com.amazonaws.services.s3.model.BucketNotificationConfiguration;
-import com.amazonaws.services.s3.model.BucketPolicy;
-import com.amazonaws.services.s3.model.BucketReplicationConfiguration;
-import com.amazonaws.services.s3.model.BucketTaggingConfiguration;
-import com.amazonaws.services.s3.model.BucketVersioningConfiguration;
-import com.amazonaws.services.s3.model.BucketWebsiteConfiguration;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
-import com.amazonaws.services.s3.model.CopyObjectRequest;
-import com.amazonaws.services.s3.model.CopyObjectResult;
-import com.amazonaws.services.s3.model.CopyPartRequest;
-import com.amazonaws.services.s3.model.CopyPartResult;
-import com.amazonaws.services.s3.model.CreateBucketRequest;
-import com.amazonaws.services.s3.model.DeleteBucketCrossOriginConfigurationRequest;
-import com.amazonaws.services.s3.model.DeleteBucketLifecycleConfigurationRequest;
-import com.amazonaws.services.s3.model.DeleteBucketPolicyRequest;
-import com.amazonaws.services.s3.model.DeleteBucketReplicationConfigurationRequest;
-import com.amazonaws.services.s3.model.DeleteBucketRequest;
-import com.amazonaws.services.s3.model.DeleteBucketTaggingConfigurationRequest;
-import com.amazonaws.services.s3.model.DeleteBucketWebsiteConfigurationRequest;
-import com.amazonaws.services.s3.model.DeleteObjectRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
-import com.amazonaws.services.s3.model.DeleteVersionRequest;
-import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest;
-import com.amazonaws.services.s3.model.GetBucketAclRequest;
-import com.amazonaws.services.s3.model.GetBucketCrossOriginConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketLifecycleConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketLocationRequest;
-import com.amazonaws.services.s3.model.GetBucketLoggingConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketNotificationConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketPolicyRequest;
-import com.amazonaws.services.s3.model.GetBucketReplicationConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketTaggingConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketVersioningConfigurationRequest;
-import com.amazonaws.services.s3.model.GetBucketWebsiteConfigurationRequest;
-import com.amazonaws.services.s3.model.GetObjectAclRequest;
-import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
-import com.amazonaws.services.s3.model.GetObjectRequest;
-import com.amazonaws.services.s3.model.GetS3AccountOwnerRequest;
-import com.amazonaws.services.s3.model.HeadBucketRequest;
-import com.amazonaws.services.s3.model.HeadBucketResult;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
-import com.amazonaws.services.s3.model.ListBucketsRequest;
-import com.amazonaws.services.s3.model.ListMultipartUploadsRequest;
-import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest;
-import com.amazonaws.services.s3.model.ListNextBatchOfVersionsRequest;
-import com.amazonaws.services.s3.model.ListObjectsRequest;
-import com.amazonaws.services.s3.model.ListPartsRequest;
-import com.amazonaws.services.s3.model.ListVersionsRequest;
-import com.amazonaws.services.s3.model.MultipartUploadListing;
-import com.amazonaws.services.s3.model.ObjectListing;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.Owner;
-import com.amazonaws.services.s3.model.PartListing;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.RestoreObjectRequest;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.SetBucketAclRequest;
-import com.amazonaws.services.s3.model.SetBucketCrossOriginConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketLifecycleConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketLoggingConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketNotificationConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketPolicyRequest;
-import com.amazonaws.services.s3.model.SetBucketReplicationConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketTaggingConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest;
-import com.amazonaws.services.s3.model.SetBucketWebsiteConfigurationRequest;
-import com.amazonaws.services.s3.model.SetObjectAclRequest;
-import com.amazonaws.services.s3.model.StorageClass;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.amazonaws.services.s3.model.UploadPartResult;
-import com.amazonaws.services.s3.model.VersionListing;
+import software.amazon.awssdk.awscore.exception.AwsServiceException;
+import software.amazon.awssdk.core.ResponseBytes;
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.exception.SdkClientException;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.core.sync.ResponseTransformer;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.S3ServiceClientConfiguration;
+import software.amazon.awssdk.services.s3.S3Utilities;
+import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse;
+import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException;
+import software.amazon.awssdk.services.s3.model.BucketAlreadyOwnedByYouException;
+import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse;
+import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
+import software.amazon.awssdk.services.s3.model.CopyObjectResponse;
+import software.amazon.awssdk.services.s3.model.CreateBucketMetadataTableConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.CreateBucketMetadataTableConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
+import software.amazon.awssdk.services.s3.model.CreateBucketResponse;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse;
+import software.amazon.awssdk.services.s3.model.CreateSessionRequest;
+import software.amazon.awssdk.services.s3.model.CreateSessionResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketCorsRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketCorsResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketMetadataTableConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketMetadataTableConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingResponse;
+import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteRequest;
+import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteResponse;
+import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectResponse;
+import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingResponse;
+import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse;
+import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockRequest;
+import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockResponse;
+import software.amazon.awssdk.services.s3.model.EncryptionTypeMismatchException;
+import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketAclRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketAclResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketCorsRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketCorsResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketEncryptionRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketEncryptionResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketLocationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketLocationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketLoggingRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketLoggingResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketMetadataTableConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketMetadataTableConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketPolicyRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketPolicyResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketReplicationRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketReplicationResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketTaggingRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketTaggingResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketVersioningRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketVersioningResponse;
+import software.amazon.awssdk.services.s3.model.GetBucketWebsiteRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketWebsiteResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectAclRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectAclResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectRetentionRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectRetentionResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectTaggingRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectTaggingResponse;
+import software.amazon.awssdk.services.s3.model.GetObjectTorrentRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectTorrentResponse;
+import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockRequest;
+import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockResponse;
+import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
+import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
+import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
+import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
+import software.amazon.awssdk.services.s3.model.InvalidObjectStateException;
+import software.amazon.awssdk.services.s3.model.InvalidRequestException;
+import software.amazon.awssdk.services.s3.model.InvalidWriteOffsetException;
+import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsRequest;
+import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsResponse;
+import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsRequest;
+import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsResponse;
+import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsRequest;
+import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsResponse;
+import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsRequest;
+import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsResponse;
+import software.amazon.awssdk.services.s3.model.ListBucketsRequest;
+import software.amazon.awssdk.services.s3.model.ListBucketsResponse;
+import software.amazon.awssdk.services.s3.model.ListDirectoryBucketsRequest;
+import software.amazon.awssdk.services.s3.model.ListDirectoryBucketsResponse;
+import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;
+import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse;
+import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest;
+import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse;
+import software.amazon.awssdk.services.s3.model.ListObjectsRequest;
+import software.amazon.awssdk.services.s3.model.ListObjectsResponse;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
+import software.amazon.awssdk.services.s3.model.ListPartsRequest;
+import software.amazon.awssdk.services.s3.model.ListPartsResponse;
+import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
+import software.amazon.awssdk.services.s3.model.NoSuchKeyException;
+import software.amazon.awssdk.services.s3.model.NoSuchUploadException;
+import software.amazon.awssdk.services.s3.model.ObjectAlreadyInActiveTierErrorException;
+import software.amazon.awssdk.services.s3.model.ObjectNotInActiveTierErrorException;
+import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketAclRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketAclResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketCorsRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketCorsResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketEncryptionRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketEncryptionResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketLoggingRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketLoggingResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketPolicyRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketPolicyResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketReplicationRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketReplicationResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketTaggingRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketTaggingResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketVersioningRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketVersioningResponse;
+import software.amazon.awssdk.services.s3.model.PutBucketWebsiteRequest;
+import software.amazon.awssdk.services.s3.model.PutBucketWebsiteResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectAclRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectAclResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectRetentionRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectRetentionResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectTaggingRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectTaggingResponse;
+import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockRequest;
+import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockResponse;
+import software.amazon.awssdk.services.s3.model.RestoreObjectRequest;
+import software.amazon.awssdk.services.s3.model.RestoreObjectResponse;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+import software.amazon.awssdk.services.s3.model.TooManyPartsException;
+import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse;
+import software.amazon.awssdk.services.s3.model.UploadPartRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartResponse;
+import software.amazon.awssdk.services.s3.model.WriteGetObjectResponseRequest;
+import software.amazon.awssdk.services.s3.model.WriteGetObjectResponseResponse;
+import software.amazon.awssdk.services.s3.paginators.ListBucketsIterable;
+import software.amazon.awssdk.services.s3.paginators.ListDirectoryBucketsIterable;
+import software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsIterable;
+import software.amazon.awssdk.services.s3.paginators.ListObjectVersionsIterable;
+import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable;
+import software.amazon.awssdk.services.s3.paginators.ListPartsIterable;
+import software.amazon.awssdk.services.s3.waiters.S3Waiter;
import org.elasticsearch.core.SuppressForbidden;
-import java.io.File;
-import java.io.InputStream;
-import java.net.URL;
-import java.util.Date;
-import java.util.List;
+import java.nio.file.Path;
+import java.util.function.Consumer;
@SuppressForbidden(reason = "implements AWS api that uses java.io.File!")
-public class AmazonS3Wrapper extends AbstractAmazonS3 {
+public class AmazonS3Wrapper implements S3Client {
- protected AmazonS3 delegate;
+ protected S3Client delegate;
- public AmazonS3Wrapper(AmazonS3 delegate) {
+ public AmazonS3Wrapper(S3Client delegate) {
this.delegate = delegate;
}
@Override
- public void setEndpoint(String endpoint) {
- delegate.setEndpoint(endpoint);
+ public void close() {
+ delegate.close();
}
@Override
- public void setRegion(Region region) throws IllegalArgumentException {
- delegate.setRegion(region);
+ public String serviceName() {
+ return "AmazonS3Wrapper";
}
@Override
- public void setS3ClientOptions(S3ClientOptions clientOptions) {
- delegate.setS3ClientOptions(clientOptions);
+ public AbortMultipartUploadResponse abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest)
+ throws NoSuchUploadException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.abortMultipartUpload(abortMultipartUploadRequest);
}
@Override
- public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException,
- AmazonServiceException {
- delegate.changeObjectStorageClass(bucketName, key, newStorageClass);
+ public AbortMultipartUploadResponse abortMultipartUpload(Consumer abortMultipartUploadRequest)
+ throws NoSuchUploadException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.abortMultipartUpload(abortMultipartUploadRequest);
}
@Override
- public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws AmazonClientException,
- AmazonServiceException {
- delegate.setObjectRedirectLocation(bucketName, key, newRedirectLocation);
+ public CompleteMultipartUploadResponse completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.completeMultipartUpload(completeMultipartUploadRequest);
}
@Override
- public ObjectListing listObjects(String bucketName) throws AmazonClientException, AmazonServiceException {
- return delegate.listObjects(bucketName);
+ public CompleteMultipartUploadResponse completeMultipartUpload(
+ Consumer completeMultipartUploadRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.completeMultipartUpload(completeMultipartUploadRequest);
}
@Override
- public ObjectListing listObjects(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException {
- return delegate.listObjects(bucketName, prefix);
+ public CopyObjectResponse copyObject(CopyObjectRequest copyObjectRequest) throws ObjectNotInActiveTierErrorException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.copyObject(copyObjectRequest);
}
@Override
- public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.listObjects(listObjectsRequest);
+ public CopyObjectResponse copyObject(Consumer copyObjectRequest) throws ObjectNotInActiveTierErrorException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.copyObject(copyObjectRequest);
}
@Override
- public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws AmazonClientException, AmazonServiceException {
- return delegate.listNextBatchOfObjects(previousObjectListing);
+ public CreateBucketResponse createBucket(CreateBucketRequest createBucketRequest) throws BucketAlreadyExistsException,
+ BucketAlreadyOwnedByYouException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createBucket(createBucketRequest);
}
@Override
- public VersionListing listVersions(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException {
- return delegate.listVersions(bucketName, prefix);
+ public CreateBucketResponse createBucket(Consumer createBucketRequest) throws BucketAlreadyExistsException,
+ BucketAlreadyOwnedByYouException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createBucket(createBucketRequest);
}
@Override
- public VersionListing listNextBatchOfVersions(VersionListing previousVersionListing) throws AmazonClientException,
- AmazonServiceException {
- return delegate.listNextBatchOfVersions(previousVersionListing);
+ public CreateBucketMetadataTableConfigurationResponse createBucketMetadataTableConfiguration(
+ CreateBucketMetadataTableConfigurationRequest createBucketMetadataTableConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createBucketMetadataTableConfiguration(createBucketMetadataTableConfigurationRequest);
}
@Override
- public VersionListing listVersions(
- String bucketName,
- String prefix,
- String keyMarker,
- String versionIdMarker,
- String delimiter,
- Integer maxResults
- ) throws AmazonClientException, AmazonServiceException {
- return delegate.listVersions(bucketName, prefix, keyMarker, versionIdMarker, delimiter, maxResults);
+ public CreateBucketMetadataTableConfigurationResponse createBucketMetadataTableConfiguration(
+ Consumer createBucketMetadataTableConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createBucketMetadataTableConfiguration(createBucketMetadataTableConfigurationRequest);
}
@Override
- public VersionListing listVersions(ListVersionsRequest listVersionsRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.listVersions(listVersionsRequest);
+ public CreateMultipartUploadResponse createMultipartUpload(CreateMultipartUploadRequest createMultipartUploadRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createMultipartUpload(createMultipartUploadRequest);
}
@Override
- public Owner getS3AccountOwner() throws AmazonClientException, AmazonServiceException {
- return delegate.getS3AccountOwner();
+ public CreateMultipartUploadResponse createMultipartUpload(Consumer createMultipartUploadRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createMultipartUpload(createMultipartUploadRequest);
}
@Override
- public boolean doesBucketExist(String bucketName) throws AmazonClientException, AmazonServiceException {
- return delegate.doesBucketExist(bucketName);
+ public CreateSessionResponse createSession(CreateSessionRequest createSessionRequest) throws NoSuchBucketException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.createSession(createSessionRequest);
}
@Override
- public List listBuckets() throws AmazonClientException, AmazonServiceException {
- return delegate.listBuckets();
+ public CreateSessionResponse createSession(Consumer createSessionRequest) throws NoSuchBucketException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.createSession(createSessionRequest);
}
@Override
- public List listBuckets(ListBucketsRequest listBucketsRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.listBuckets(listBucketsRequest);
+ public DeleteBucketResponse deleteBucket(DeleteBucketRequest deleteBucketRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.deleteBucket(deleteBucketRequest);
}
@Override
- public String getBucketLocation(String bucketName) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketLocation(bucketName);
+ public DeleteBucketResponse deleteBucket(Consumer deleteBucketRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.deleteBucket(deleteBucketRequest);
}
@Override
- public String getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getBucketLocation(getBucketLocationRequest);
+ public DeleteBucketAnalyticsConfigurationResponse deleteBucketAnalyticsConfiguration(
+ DeleteBucketAnalyticsConfigurationRequest deleteBucketAnalyticsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketAnalyticsConfiguration(deleteBucketAnalyticsConfigurationRequest);
}
@Override
- public Bucket createBucket(CreateBucketRequest createBucketRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.createBucket(createBucketRequest);
+ public DeleteBucketAnalyticsConfigurationResponse deleteBucketAnalyticsConfiguration(
+ Consumer deleteBucketAnalyticsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketAnalyticsConfiguration(deleteBucketAnalyticsConfigurationRequest);
}
@Override
- public Bucket createBucket(String bucketName) throws AmazonClientException, AmazonServiceException {
- return delegate.createBucket(bucketName);
+ public DeleteBucketCorsResponse deleteBucketCors(DeleteBucketCorsRequest deleteBucketCorsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.deleteBucketCors(deleteBucketCorsRequest);
}
@Override
- public Bucket createBucket(String bucketName, com.amazonaws.services.s3.model.Region region) throws AmazonClientException,
- AmazonServiceException {
- return delegate.createBucket(bucketName, region);
+ public DeleteBucketCorsResponse deleteBucketCors(Consumer deleteBucketCorsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketCors(deleteBucketCorsRequest);
}
@Override
- public Bucket createBucket(String bucketName, String region) throws AmazonClientException, AmazonServiceException {
- return delegate.createBucket(bucketName, region);
+ public DeleteBucketEncryptionResponse deleteBucketEncryption(DeleteBucketEncryptionRequest deleteBucketEncryptionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketEncryption(deleteBucketEncryptionRequest);
}
@Override
- public AccessControlList getObjectAcl(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
- return delegate.getObjectAcl(bucketName, key);
+ public DeleteBucketEncryptionResponse deleteBucketEncryption(
+ Consumer deleteBucketEncryptionRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketEncryption(deleteBucketEncryptionRequest);
}
@Override
- public AccessControlList getObjectAcl(String bucketName, String key, String versionId) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getObjectAcl(bucketName, key, versionId);
+ public DeleteBucketIntelligentTieringConfigurationResponse deleteBucketIntelligentTieringConfiguration(
+ DeleteBucketIntelligentTieringConfigurationRequest deleteBucketIntelligentTieringConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketIntelligentTieringConfiguration(deleteBucketIntelligentTieringConfigurationRequest);
}
@Override
- public AccessControlList getObjectAcl(GetObjectAclRequest getObjectAclRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.getObjectAcl(getObjectAclRequest);
+ public DeleteBucketIntelligentTieringConfigurationResponse deleteBucketIntelligentTieringConfiguration(
+ Consumer deleteBucketIntelligentTieringConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketIntelligentTieringConfiguration(deleteBucketIntelligentTieringConfigurationRequest);
+ }
+
+ @Override
+ public DeleteBucketInventoryConfigurationResponse deleteBucketInventoryConfiguration(
+ DeleteBucketInventoryConfigurationRequest deleteBucketInventoryConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketInventoryConfiguration(deleteBucketInventoryConfigurationRequest);
+ }
+
+ @Override
+ public DeleteBucketInventoryConfigurationResponse deleteBucketInventoryConfiguration(
+ Consumer deleteBucketInventoryConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketInventoryConfiguration(deleteBucketInventoryConfigurationRequest);
+ }
+
+ @Override
+ public DeleteBucketLifecycleResponse deleteBucketLifecycle(DeleteBucketLifecycleRequest deleteBucketLifecycleRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketLifecycle(deleteBucketLifecycleRequest);
+ }
+
+ @Override
+ public DeleteBucketLifecycleResponse deleteBucketLifecycle(Consumer deleteBucketLifecycleRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketLifecycle(deleteBucketLifecycleRequest);
}
@Override
- public void setObjectAcl(String bucketName, String key, AccessControlList acl) throws AmazonClientException, AmazonServiceException {
- delegate.setObjectAcl(bucketName, key, acl);
+ public DeleteBucketMetadataTableConfigurationResponse deleteBucketMetadataTableConfiguration(
+ DeleteBucketMetadataTableConfigurationRequest deleteBucketMetadataTableConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketMetadataTableConfiguration(deleteBucketMetadataTableConfigurationRequest);
}
@Override
- public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl) throws AmazonClientException,
- AmazonServiceException {
- delegate.setObjectAcl(bucketName, key, acl);
+ public DeleteBucketMetadataTableConfigurationResponse deleteBucketMetadataTableConfiguration(
+ Consumer deleteBucketMetadataTableConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketMetadataTableConfiguration(deleteBucketMetadataTableConfigurationRequest);
}
@Override
- public void setObjectAcl(String bucketName, String key, String versionId, AccessControlList acl) throws AmazonClientException,
- AmazonServiceException {
- delegate.setObjectAcl(bucketName, key, versionId, acl);
+ public DeleteBucketMetricsConfigurationResponse deleteBucketMetricsConfiguration(
+ DeleteBucketMetricsConfigurationRequest deleteBucketMetricsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketMetricsConfiguration(deleteBucketMetricsConfigurationRequest);
}
@Override
- public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl) throws AmazonClientException,
- AmazonServiceException {
- delegate.setObjectAcl(bucketName, key, versionId, acl);
+ public DeleteBucketMetricsConfigurationResponse deleteBucketMetricsConfiguration(
+ Consumer deleteBucketMetricsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketMetricsConfiguration(deleteBucketMetricsConfigurationRequest);
}
@Override
- public void setObjectAcl(SetObjectAclRequest setObjectAclRequest) throws AmazonClientException, AmazonServiceException {
- delegate.setObjectAcl(setObjectAclRequest);
+ public DeleteBucketOwnershipControlsResponse deleteBucketOwnershipControls(
+ DeleteBucketOwnershipControlsRequest deleteBucketOwnershipControlsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketOwnershipControls(deleteBucketOwnershipControlsRequest);
}
@Override
- public AccessControlList getBucketAcl(String bucketName) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketAcl(bucketName);
+ public DeleteBucketOwnershipControlsResponse deleteBucketOwnershipControls(
+ Consumer deleteBucketOwnershipControlsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketOwnershipControls(deleteBucketOwnershipControlsRequest);
}
@Override
- public void setBucketAcl(SetBucketAclRequest setBucketAclRequest) throws AmazonClientException, AmazonServiceException {
- delegate.setBucketAcl(setBucketAclRequest);
+ public DeleteBucketPolicyResponse deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.deleteBucketPolicy(deleteBucketPolicyRequest);
}
@Override
- public AccessControlList getBucketAcl(GetBucketAclRequest getBucketAclRequest) throws AmazonClientException, AmazonServiceException {
+ public DeleteBucketPolicyResponse deleteBucketPolicy(Consumer deleteBucketPolicyRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketPolicy(deleteBucketPolicyRequest);
+ }
+
+ @Override
+ public DeleteBucketReplicationResponse deleteBucketReplication(DeleteBucketReplicationRequest deleteBucketReplicationRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketReplication(deleteBucketReplicationRequest);
+ }
+
+ @Override
+ public DeleteBucketReplicationResponse deleteBucketReplication(
+ Consumer deleteBucketReplicationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketReplication(deleteBucketReplicationRequest);
+ }
+
+ @Override
+ public DeleteBucketTaggingResponse deleteBucketTagging(DeleteBucketTaggingRequest deleteBucketTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketTagging(deleteBucketTaggingRequest);
+ }
+
+ @Override
+ public DeleteBucketTaggingResponse deleteBucketTagging(Consumer deleteBucketTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketTagging(deleteBucketTaggingRequest);
+ }
+
+ @Override
+ public DeleteBucketWebsiteResponse deleteBucketWebsite(DeleteBucketWebsiteRequest deleteBucketWebsiteRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketWebsite(deleteBucketWebsiteRequest);
+ }
+
+ @Override
+ public DeleteBucketWebsiteResponse deleteBucketWebsite(Consumer deleteBucketWebsiteRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteBucketWebsite(deleteBucketWebsiteRequest);
+ }
+
+ @Override
+ public DeleteObjectResponse deleteObject(DeleteObjectRequest deleteObjectRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.deleteObject(deleteObjectRequest);
+ }
+
+ @Override
+ public DeleteObjectResponse deleteObject(Consumer deleteObjectRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.deleteObject(deleteObjectRequest);
+ }
+
+ @Override
+ public DeleteObjectTaggingResponse deleteObjectTagging(DeleteObjectTaggingRequest deleteObjectTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteObjectTagging(deleteObjectTaggingRequest);
+ }
+
+ @Override
+ public DeleteObjectTaggingResponse deleteObjectTagging(Consumer deleteObjectTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deleteObjectTagging(deleteObjectTaggingRequest);
+ }
+
+ @Override
+ public DeleteObjectsResponse deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.deleteObjects(deleteObjectsRequest);
+ }
+
+ @Override
+ public DeleteObjectsResponse deleteObjects(Consumer deleteObjectsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.deleteObjects(deleteObjectsRequest);
+ }
+
+ @Override
+ public DeletePublicAccessBlockResponse deletePublicAccessBlock(DeletePublicAccessBlockRequest deletePublicAccessBlockRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deletePublicAccessBlock(deletePublicAccessBlockRequest);
+ }
+
+ @Override
+ public DeletePublicAccessBlockResponse deletePublicAccessBlock(
+ Consumer deletePublicAccessBlockRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.deletePublicAccessBlock(deletePublicAccessBlockRequest);
+ }
+
+ @Override
+ public GetBucketAccelerateConfigurationResponse getBucketAccelerateConfiguration(
+ GetBucketAccelerateConfigurationRequest getBucketAccelerateConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketAccelerateConfiguration(getBucketAccelerateConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketAccelerateConfigurationResponse getBucketAccelerateConfiguration(
+ Consumer getBucketAccelerateConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketAccelerateConfiguration(getBucketAccelerateConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketAclResponse getBucketAcl(GetBucketAclRequest getBucketAclRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.getBucketAcl(getBucketAclRequest);
+ }
+
+ @Override
+ public GetBucketAclResponse getBucketAcl(Consumer getBucketAclRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
return delegate.getBucketAcl(getBucketAclRequest);
}
@Override
- public void setBucketAcl(String bucketName, AccessControlList acl) throws AmazonClientException, AmazonServiceException {
- delegate.setBucketAcl(bucketName, acl);
+ public GetBucketAnalyticsConfigurationResponse getBucketAnalyticsConfiguration(
+ GetBucketAnalyticsConfigurationRequest getBucketAnalyticsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketAnalyticsConfiguration(getBucketAnalyticsConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketAnalyticsConfigurationResponse getBucketAnalyticsConfiguration(
+ Consumer getBucketAnalyticsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketAnalyticsConfiguration(getBucketAnalyticsConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketCorsResponse getBucketCors(GetBucketCorsRequest getBucketCorsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.getBucketCors(getBucketCorsRequest);
+ }
+
+ @Override
+ public GetBucketCorsResponse getBucketCors(Consumer getBucketCorsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getBucketCors(getBucketCorsRequest);
+ }
+
+ @Override
+ public GetBucketEncryptionResponse getBucketEncryption(GetBucketEncryptionRequest getBucketEncryptionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketEncryption(getBucketEncryptionRequest);
+ }
+
+ @Override
+ public GetBucketEncryptionResponse getBucketEncryption(Consumer getBucketEncryptionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketEncryption(getBucketEncryptionRequest);
+ }
+
+ @Override
+ public GetBucketIntelligentTieringConfigurationResponse getBucketIntelligentTieringConfiguration(
+ GetBucketIntelligentTieringConfigurationRequest getBucketIntelligentTieringConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketIntelligentTieringConfiguration(getBucketIntelligentTieringConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketIntelligentTieringConfigurationResponse getBucketIntelligentTieringConfiguration(
+ Consumer getBucketIntelligentTieringConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketIntelligentTieringConfiguration(getBucketIntelligentTieringConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketInventoryConfigurationResponse getBucketInventoryConfiguration(
+ GetBucketInventoryConfigurationRequest getBucketInventoryConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketInventoryConfiguration(getBucketInventoryConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketInventoryConfigurationResponse getBucketInventoryConfiguration(
+ Consumer getBucketInventoryConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketInventoryConfiguration(getBucketInventoryConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketLifecycleConfigurationResponse getBucketLifecycleConfiguration(
+ GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketLifecycleConfigurationResponse getBucketLifecycleConfiguration(
+ Consumer getBucketLifecycleConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest);
}
@Override
- public void setBucketAcl(String bucketName, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException {
- delegate.setBucketAcl(bucketName, acl);
+ public GetBucketLocationResponse getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getBucketLocation(getBucketLocationRequest);
}
@Override
- public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
- return delegate.getObjectMetadata(bucketName, key);
+ public GetBucketLocationResponse getBucketLocation(Consumer getBucketLocationRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketLocation(getBucketLocationRequest);
}
@Override
- public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getObjectMetadata(getObjectMetadataRequest);
+ public GetBucketLoggingResponse getBucketLogging(GetBucketLoggingRequest getBucketLoggingRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getBucketLogging(getBucketLoggingRequest);
}
@Override
- public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
- return delegate.getObject(bucketName, key);
+ public GetBucketLoggingResponse getBucketLogging(Consumer getBucketLoggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketLogging(getBucketLoggingRequest);
}
@Override
- public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException {
+ public GetBucketMetadataTableConfigurationResponse getBucketMetadataTableConfiguration(
+ GetBucketMetadataTableConfigurationRequest getBucketMetadataTableConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketMetadataTableConfiguration(getBucketMetadataTableConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketMetadataTableConfigurationResponse getBucketMetadataTableConfiguration(
+ Consumer getBucketMetadataTableConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketMetadataTableConfiguration(getBucketMetadataTableConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketMetricsConfigurationResponse getBucketMetricsConfiguration(
+ GetBucketMetricsConfigurationRequest getBucketMetricsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketMetricsConfiguration(getBucketMetricsConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketMetricsConfigurationResponse getBucketMetricsConfiguration(
+ Consumer getBucketMetricsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketMetricsConfiguration(getBucketMetricsConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketNotificationConfigurationResponse getBucketNotificationConfiguration(
+ GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketNotificationConfigurationResponse getBucketNotificationConfiguration(
+ Consumer getBucketNotificationConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest);
+ }
+
+ @Override
+ public GetBucketOwnershipControlsResponse getBucketOwnershipControls(
+ GetBucketOwnershipControlsRequest getBucketOwnershipControlsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketOwnershipControls(getBucketOwnershipControlsRequest);
+ }
+
+ @Override
+ public GetBucketOwnershipControlsResponse getBucketOwnershipControls(
+ Consumer getBucketOwnershipControlsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketOwnershipControls(getBucketOwnershipControlsRequest);
+ }
+
+ @Override
+ public GetBucketPolicyResponse getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getBucketPolicy(getBucketPolicyRequest);
+ }
+
+ @Override
+ public GetBucketPolicyResponse getBucketPolicy(Consumer getBucketPolicyRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketPolicy(getBucketPolicyRequest);
+ }
+
+ @Override
+ public GetBucketPolicyStatusResponse getBucketPolicyStatus(GetBucketPolicyStatusRequest getBucketPolicyStatusRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketPolicyStatus(getBucketPolicyStatusRequest);
+ }
+
+ @Override
+ public GetBucketPolicyStatusResponse getBucketPolicyStatus(Consumer getBucketPolicyStatusRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketPolicyStatus(getBucketPolicyStatusRequest);
+ }
+
+ @Override
+ public GetBucketReplicationResponse getBucketReplication(GetBucketReplicationRequest getBucketReplicationRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketReplication(getBucketReplicationRequest);
+ }
+
+ @Override
+ public GetBucketReplicationResponse getBucketReplication(Consumer getBucketReplicationRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketReplication(getBucketReplicationRequest);
+ }
+
+ @Override
+ public GetBucketRequestPaymentResponse getBucketRequestPayment(GetBucketRequestPaymentRequest getBucketRequestPaymentRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketRequestPayment(getBucketRequestPaymentRequest);
+ }
+
+ @Override
+ public GetBucketRequestPaymentResponse getBucketRequestPayment(
+ Consumer getBucketRequestPaymentRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketRequestPayment(getBucketRequestPaymentRequest);
+ }
+
+ @Override
+ public GetBucketTaggingResponse getBucketTagging(GetBucketTaggingRequest getBucketTaggingRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getBucketTagging(getBucketTaggingRequest);
+ }
+
+ @Override
+ public GetBucketTaggingResponse getBucketTagging(Consumer getBucketTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketTagging(getBucketTaggingRequest);
+ }
+
+ @Override
+ public GetBucketVersioningResponse getBucketVersioning(GetBucketVersioningRequest getBucketVersioningRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketVersioning(getBucketVersioningRequest);
+ }
+
+ @Override
+ public GetBucketVersioningResponse getBucketVersioning(Consumer getBucketVersioningRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketVersioning(getBucketVersioningRequest);
+ }
+
+ @Override
+ public GetBucketWebsiteResponse getBucketWebsite(GetBucketWebsiteRequest getBucketWebsiteRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getBucketWebsite(getBucketWebsiteRequest);
+ }
+
+ @Override
+ public GetBucketWebsiteResponse getBucketWebsite(Consumer getBucketWebsiteRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getBucketWebsite(getBucketWebsiteRequest);
+ }
+
+ @Override
+ public ReturnT getObject(
+ GetObjectRequest getObjectRequest,
+ ResponseTransformer responseTransformer
+ ) throws NoSuchKeyException, InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObject(getObjectRequest, responseTransformer);
+ }
+
+ @Override
+ public ReturnT getObject(
+ Consumer getObjectRequest,
+ ResponseTransformer responseTransformer
+ ) throws NoSuchKeyException, InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObject(getObjectRequest, responseTransformer);
+ }
+
+ @Override
+ public GetObjectResponse getObject(GetObjectRequest getObjectRequest, Path destinationPath) throws NoSuchKeyException,
+ InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObject(getObjectRequest, destinationPath);
+ }
+
+ @Override
+ public GetObjectResponse getObject(Consumer getObjectRequest, Path destinationPath) throws NoSuchKeyException,
+ InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObject(getObjectRequest, destinationPath);
+ }
+
+ @Override
+ public ResponseInputStream getObject(GetObjectRequest getObjectRequest) throws NoSuchKeyException,
+ InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
return delegate.getObject(getObjectRequest);
}
@Override
- public ObjectMetadata getObject(GetObjectRequest getObjectRequest, File destinationFile) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getObject(getObjectRequest, destinationFile);
+ public ResponseInputStream getObject(Consumer getObjectRequest) throws NoSuchKeyException,
+ InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObject(getObjectRequest);
}
@Override
- public void deleteBucket(DeleteBucketRequest deleteBucketRequest) throws AmazonClientException, AmazonServiceException {
- delegate.deleteBucket(deleteBucketRequest);
+ public ResponseBytes getObjectAsBytes(GetObjectRequest getObjectRequest) throws NoSuchKeyException,
+ InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectAsBytes(getObjectRequest);
}
@Override
- public void deleteBucket(String bucketName) throws AmazonClientException, AmazonServiceException {
- delegate.deleteBucket(bucketName);
+ public ResponseBytes getObjectAsBytes(Consumer getObjectRequest) throws NoSuchKeyException,
+ InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectAsBytes(getObjectRequest);
}
@Override
- public void setBucketReplicationConfiguration(String bucketName, BucketReplicationConfiguration configuration)
- throws AmazonServiceException, AmazonClientException {
- delegate.setBucketReplicationConfiguration(bucketName, configuration);
+ public GetObjectAclResponse getObjectAcl(GetObjectAclRequest getObjectAclRequest) throws NoSuchKeyException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getObjectAcl(getObjectAclRequest);
}
@Override
- public void setBucketReplicationConfiguration(SetBucketReplicationConfigurationRequest setBucketReplicationConfigurationRequest)
- throws AmazonServiceException, AmazonClientException {
- delegate.setBucketReplicationConfiguration(setBucketReplicationConfigurationRequest);
+ public GetObjectAclResponse getObjectAcl(Consumer getObjectAclRequest) throws NoSuchKeyException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectAcl(getObjectAclRequest);
}
@Override
- public BucketReplicationConfiguration getBucketReplicationConfiguration(String bucketName) throws AmazonServiceException,
- AmazonClientException {
- return delegate.getBucketReplicationConfiguration(bucketName);
+ public GetObjectAttributesResponse getObjectAttributes(GetObjectAttributesRequest getObjectAttributesRequest) throws NoSuchKeyException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectAttributes(getObjectAttributesRequest);
}
@Override
- public void deleteBucketReplicationConfiguration(String bucketName) throws AmazonServiceException, AmazonClientException {
- delegate.deleteBucketReplicationConfiguration(bucketName);
+ public GetObjectAttributesResponse getObjectAttributes(Consumer getObjectAttributesRequest)
+ throws NoSuchKeyException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectAttributes(getObjectAttributesRequest);
}
@Override
- public void deleteBucketReplicationConfiguration(DeleteBucketReplicationConfigurationRequest request) throws AmazonServiceException,
- AmazonClientException {
- delegate.deleteBucketReplicationConfiguration(request);
+ public GetObjectLegalHoldResponse getObjectLegalHold(GetObjectLegalHoldRequest getObjectLegalHoldRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getObjectLegalHold(getObjectLegalHoldRequest);
}
@Override
- public boolean doesObjectExist(String bucketName, String objectName) throws AmazonServiceException, AmazonClientException {
- return delegate.doesObjectExist(bucketName, objectName);
+ public GetObjectLegalHoldResponse getObjectLegalHold(Consumer getObjectLegalHoldRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectLegalHold(getObjectLegalHoldRequest);
}
@Override
- public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.putObject(putObjectRequest);
+ public GetObjectLockConfigurationResponse getObjectLockConfiguration(
+ GetObjectLockConfigurationRequest getObjectLockConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectLockConfiguration(getObjectLockConfigurationRequest);
}
@Override
- public PutObjectResult putObject(String bucketName, String key, File file) throws AmazonClientException, AmazonServiceException {
- return delegate.putObject(bucketName, key, file);
+ public GetObjectLockConfigurationResponse getObjectLockConfiguration(
+ Consumer getObjectLockConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectLockConfiguration(getObjectLockConfigurationRequest);
}
@Override
- public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata)
- throws AmazonClientException, AmazonServiceException {
- return delegate.putObject(bucketName, key, input, metadata);
+ public GetObjectRetentionResponse getObjectRetention(GetObjectRetentionRequest getObjectRetentionRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getObjectRetention(getObjectRetentionRequest);
}
@Override
- public CopyObjectResult copyObject(String sourceBucketName, String sourceKey, String destinationBucketName, String destinationKey)
- throws AmazonClientException, AmazonServiceException {
- return delegate.copyObject(sourceBucketName, sourceKey, destinationBucketName, destinationKey);
+ public GetObjectRetentionResponse getObjectRetention(Consumer getObjectRetentionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectRetention(getObjectRetentionRequest);
}
@Override
- public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.copyObject(copyObjectRequest);
+ public GetObjectTaggingResponse getObjectTagging(GetObjectTaggingRequest getObjectTaggingRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.getObjectTagging(getObjectTaggingRequest);
}
@Override
- public CopyPartResult copyPart(CopyPartRequest copyPartRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.copyPart(copyPartRequest);
+ public GetObjectTaggingResponse getObjectTagging(Consumer getObjectTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTagging(getObjectTaggingRequest);
}
@Override
- public void deleteObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
- delegate.deleteObject(bucketName, key);
+ public ReturnT getObjectTorrent(
+ GetObjectTorrentRequest getObjectTorrentRequest,
+ ResponseTransformer responseTransformer
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrent(getObjectTorrentRequest, responseTransformer);
}
@Override
- public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws AmazonClientException, AmazonServiceException {
- delegate.deleteObject(deleteObjectRequest);
+ public ReturnT getObjectTorrent(
+ Consumer getObjectTorrentRequest,
+ ResponseTransformer responseTransformer
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrent(getObjectTorrentRequest, responseTransformer);
}
@Override
- public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AmazonClientException,
- AmazonServiceException {
- return delegate.deleteObjects(deleteObjectsRequest);
+ public GetObjectTorrentResponse getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest, Path destinationPath)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrent(getObjectTorrentRequest, destinationPath);
}
@Override
- public void deleteVersion(String bucketName, String key, String versionId) throws AmazonClientException, AmazonServiceException {
- delegate.deleteVersion(bucketName, key, versionId);
+ public GetObjectTorrentResponse getObjectTorrent(
+ Consumer getObjectTorrentRequest,
+ Path destinationPath
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrent(getObjectTorrentRequest, destinationPath);
}
@Override
- public void deleteVersion(DeleteVersionRequest deleteVersionRequest) throws AmazonClientException, AmazonServiceException {
- delegate.deleteVersion(deleteVersionRequest);
+ public ResponseInputStream getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrent(getObjectTorrentRequest);
}
@Override
- public BucketLoggingConfiguration getBucketLoggingConfiguration(String bucketName) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getBucketLoggingConfiguration(bucketName);
+ public ResponseInputStream getObjectTorrent(Consumer getObjectTorrentRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrent(getObjectTorrentRequest);
}
@Override
- public void setBucketLoggingConfiguration(SetBucketLoggingConfigurationRequest setBucketLoggingConfigurationRequest)
- throws AmazonClientException, AmazonServiceException {
- delegate.setBucketLoggingConfiguration(setBucketLoggingConfigurationRequest);
+ public ResponseBytes getObjectTorrentAsBytes(GetObjectTorrentRequest getObjectTorrentRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrentAsBytes(getObjectTorrentRequest);
}
@Override
- public BucketVersioningConfiguration getBucketVersioningConfiguration(String bucketName) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getBucketVersioningConfiguration(bucketName);
+ public ResponseBytes getObjectTorrentAsBytes(
+ Consumer getObjectTorrentRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getObjectTorrentAsBytes(getObjectTorrentRequest);
}
@Override
- public void setBucketVersioningConfiguration(SetBucketVersioningConfigurationRequest setBucketVersioningConfigurationRequest)
- throws AmazonClientException, AmazonServiceException {
- delegate.setBucketVersioningConfiguration(setBucketVersioningConfigurationRequest);
+ public GetPublicAccessBlockResponse getPublicAccessBlock(GetPublicAccessBlockRequest getPublicAccessBlockRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getPublicAccessBlock(getPublicAccessBlockRequest);
}
@Override
- public BucketLifecycleConfiguration getBucketLifecycleConfiguration(String bucketName) {
- return delegate.getBucketLifecycleConfiguration(bucketName);
+ public GetPublicAccessBlockResponse getPublicAccessBlock(Consumer getPublicAccessBlockRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.getPublicAccessBlock(getPublicAccessBlockRequest);
}
@Override
- public void setBucketLifecycleConfiguration(String bucketName, BucketLifecycleConfiguration bucketLifecycleConfiguration) {
- delegate.setBucketLifecycleConfiguration(bucketName, bucketLifecycleConfiguration);
+ public HeadBucketResponse headBucket(HeadBucketRequest headBucketRequest) throws NoSuchBucketException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.headBucket(headBucketRequest);
}
@Override
- public void setBucketLifecycleConfiguration(SetBucketLifecycleConfigurationRequest setBucketLifecycleConfigurationRequest) {
- delegate.setBucketLifecycleConfiguration(setBucketLifecycleConfigurationRequest);
+ public HeadBucketResponse headBucket(Consumer headBucketRequest) throws NoSuchBucketException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.headBucket(headBucketRequest);
}
@Override
- public void deleteBucketLifecycleConfiguration(String bucketName) {
- delegate.deleteBucketLifecycleConfiguration(bucketName);
+ public HeadObjectResponse headObject(HeadObjectRequest headObjectRequest) throws NoSuchKeyException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.headObject(headObjectRequest);
}
@Override
- public void deleteBucketLifecycleConfiguration(DeleteBucketLifecycleConfigurationRequest deleteBucketLifecycleConfigurationRequest) {
- delegate.deleteBucketLifecycleConfiguration(deleteBucketLifecycleConfigurationRequest);
+ public HeadObjectResponse headObject(Consumer headObjectRequest) throws NoSuchKeyException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.headObject(headObjectRequest);
}
@Override
- public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(String bucketName) {
- return delegate.getBucketCrossOriginConfiguration(bucketName);
+ public ListBucketAnalyticsConfigurationsResponse listBucketAnalyticsConfigurations(
+ ListBucketAnalyticsConfigurationsRequest listBucketAnalyticsConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketAnalyticsConfigurations(listBucketAnalyticsConfigurationsRequest);
}
@Override
- public void setBucketCrossOriginConfiguration(String bucketName, BucketCrossOriginConfiguration bucketCrossOriginConfiguration) {
- delegate.setBucketCrossOriginConfiguration(bucketName, bucketCrossOriginConfiguration);
+ public ListBucketAnalyticsConfigurationsResponse listBucketAnalyticsConfigurations(
+ Consumer listBucketAnalyticsConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketAnalyticsConfigurations(listBucketAnalyticsConfigurationsRequest);
}
@Override
- public void setBucketCrossOriginConfiguration(SetBucketCrossOriginConfigurationRequest setBucketCrossOriginConfigurationRequest) {
- delegate.setBucketCrossOriginConfiguration(setBucketCrossOriginConfigurationRequest);
+ public ListBucketIntelligentTieringConfigurationsResponse listBucketIntelligentTieringConfigurations(
+ ListBucketIntelligentTieringConfigurationsRequest listBucketIntelligentTieringConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketIntelligentTieringConfigurations(listBucketIntelligentTieringConfigurationsRequest);
}
@Override
- public void deleteBucketCrossOriginConfiguration(String bucketName) {
- delegate.deleteBucketCrossOriginConfiguration(bucketName);
+ public ListBucketIntelligentTieringConfigurationsResponse listBucketIntelligentTieringConfigurations(
+ Consumer listBucketIntelligentTieringConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketIntelligentTieringConfigurations(listBucketIntelligentTieringConfigurationsRequest);
}
@Override
- public void deleteBucketCrossOriginConfiguration(
- DeleteBucketCrossOriginConfigurationRequest deleteBucketCrossOriginConfigurationRequest
- ) {
- delegate.deleteBucketCrossOriginConfiguration(deleteBucketCrossOriginConfigurationRequest);
+ public ListBucketInventoryConfigurationsResponse listBucketInventoryConfigurations(
+ ListBucketInventoryConfigurationsRequest listBucketInventoryConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketInventoryConfigurations(listBucketInventoryConfigurationsRequest);
}
@Override
- public BucketTaggingConfiguration getBucketTaggingConfiguration(String bucketName) {
- return delegate.getBucketTaggingConfiguration(bucketName);
+ public ListBucketInventoryConfigurationsResponse listBucketInventoryConfigurations(
+ Consumer listBucketInventoryConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketInventoryConfigurations(listBucketInventoryConfigurationsRequest);
}
@Override
- public void setBucketTaggingConfiguration(String bucketName, BucketTaggingConfiguration bucketTaggingConfiguration) {
- delegate.setBucketTaggingConfiguration(bucketName, bucketTaggingConfiguration);
+ public ListBucketMetricsConfigurationsResponse listBucketMetricsConfigurations(
+ ListBucketMetricsConfigurationsRequest listBucketMetricsConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketMetricsConfigurations(listBucketMetricsConfigurationsRequest);
}
@Override
- public void setBucketTaggingConfiguration(SetBucketTaggingConfigurationRequest setBucketTaggingConfigurationRequest) {
- delegate.setBucketTaggingConfiguration(setBucketTaggingConfigurationRequest);
+ public ListBucketMetricsConfigurationsResponse listBucketMetricsConfigurations(
+ Consumer listBucketMetricsConfigurationsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketMetricsConfigurations(listBucketMetricsConfigurationsRequest);
}
@Override
- public void deleteBucketTaggingConfiguration(String bucketName) {
- delegate.deleteBucketTaggingConfiguration(bucketName);
+ public ListBucketsResponse listBuckets(ListBucketsRequest listBucketsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.listBuckets(listBucketsRequest);
}
@Override
- public void deleteBucketTaggingConfiguration(DeleteBucketTaggingConfigurationRequest deleteBucketTaggingConfigurationRequest) {
- delegate.deleteBucketTaggingConfiguration(deleteBucketTaggingConfigurationRequest);
+ public ListBucketsResponse listBuckets(Consumer listBucketsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.listBuckets(listBucketsRequest);
}
@Override
- public BucketNotificationConfiguration getBucketNotificationConfiguration(String bucketName) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getBucketNotificationConfiguration(bucketName);
+ public ListBucketsResponse listBuckets() throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBuckets();
}
@Override
- public void setBucketNotificationConfiguration(SetBucketNotificationConfigurationRequest setBucketNotificationConfigurationRequest)
- throws AmazonClientException, AmazonServiceException {
- delegate.setBucketNotificationConfiguration(setBucketNotificationConfigurationRequest);
+ public ListBucketsIterable listBucketsPaginator() throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listBucketsPaginator();
}
@Override
- public void setBucketNotificationConfiguration(String bucketName, BucketNotificationConfiguration bucketNotificationConfiguration)
- throws AmazonClientException, AmazonServiceException {
- delegate.setBucketNotificationConfiguration(bucketName, bucketNotificationConfiguration);
+ public ListBucketsIterable listBucketsPaginator(ListBucketsRequest listBucketsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.listBucketsPaginator(listBucketsRequest);
}
@Override
- public BucketWebsiteConfiguration getBucketWebsiteConfiguration(String bucketName) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getBucketWebsiteConfiguration(bucketName);
+ public ListBucketsIterable listBucketsPaginator(Consumer listBucketsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.listBucketsPaginator(listBucketsRequest);
}
@Override
- public BucketWebsiteConfiguration getBucketWebsiteConfiguration(
- GetBucketWebsiteConfigurationRequest getBucketWebsiteConfigurationRequest
- ) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketWebsiteConfiguration(getBucketWebsiteConfigurationRequest);
+ public ListDirectoryBucketsResponse listDirectoryBuckets(ListDirectoryBucketsRequest listDirectoryBucketsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listDirectoryBuckets(listDirectoryBucketsRequest);
}
@Override
- public void setBucketWebsiteConfiguration(String bucketName, BucketWebsiteConfiguration configuration) throws AmazonClientException,
- AmazonServiceException {
- delegate.setBucketWebsiteConfiguration(bucketName, configuration);
+ public ListDirectoryBucketsResponse listDirectoryBuckets(Consumer listDirectoryBucketsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listDirectoryBuckets(listDirectoryBucketsRequest);
}
@Override
- public void setBucketWebsiteConfiguration(SetBucketWebsiteConfigurationRequest setBucketWebsiteConfigurationRequest)
- throws AmazonClientException, AmazonServiceException {
- delegate.setBucketWebsiteConfiguration(setBucketWebsiteConfigurationRequest);
+ public ListDirectoryBucketsIterable listDirectoryBucketsPaginator(ListDirectoryBucketsRequest listDirectoryBucketsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listDirectoryBucketsPaginator(listDirectoryBucketsRequest);
}
@Override
- public void deleteBucketWebsiteConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException {
- delegate.deleteBucketWebsiteConfiguration(bucketName);
+ public ListDirectoryBucketsIterable listDirectoryBucketsPaginator(
+ Consumer listDirectoryBucketsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listDirectoryBucketsPaginator(listDirectoryBucketsRequest);
}
@Override
- public void deleteBucketWebsiteConfiguration(DeleteBucketWebsiteConfigurationRequest deleteBucketWebsiteConfigurationRequest)
- throws AmazonClientException, AmazonServiceException {
- delegate.deleteBucketWebsiteConfiguration(deleteBucketWebsiteConfigurationRequest);
+ public ListMultipartUploadsResponse listMultipartUploads(ListMultipartUploadsRequest listMultipartUploadsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listMultipartUploads(listMultipartUploadsRequest);
}
@Override
- public BucketPolicy getBucketPolicy(String bucketName) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketPolicy(bucketName);
+ public ListMultipartUploadsResponse listMultipartUploads(Consumer listMultipartUploadsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listMultipartUploads(listMultipartUploadsRequest);
}
@Override
- public BucketPolicy getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AmazonClientException,
- AmazonServiceException {
- return delegate.getBucketPolicy(getBucketPolicyRequest);
+ public ListMultipartUploadsIterable listMultipartUploadsPaginator(ListMultipartUploadsRequest listMultipartUploadsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listMultipartUploadsPaginator(listMultipartUploadsRequest);
}
@Override
- public void setBucketPolicy(String bucketName, String policyText) throws AmazonClientException, AmazonServiceException {
- delegate.setBucketPolicy(bucketName, policyText);
+ public ListMultipartUploadsIterable listMultipartUploadsPaginator(
+ Consumer listMultipartUploadsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listMultipartUploadsPaginator(listMultipartUploadsRequest);
}
@Override
- public void setBucketPolicy(SetBucketPolicyRequest setBucketPolicyRequest) throws AmazonClientException, AmazonServiceException {
- delegate.setBucketPolicy(setBucketPolicyRequest);
+ public ListObjectVersionsResponse listObjectVersions(ListObjectVersionsRequest listObjectVersionsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.listObjectVersions(listObjectVersionsRequest);
}
@Override
- public void deleteBucketPolicy(String bucketName) throws AmazonClientException, AmazonServiceException {
- delegate.deleteBucketPolicy(bucketName);
+ public ListObjectVersionsResponse listObjectVersions(Consumer listObjectVersionsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjectVersions(listObjectVersionsRequest);
}
@Override
- public void deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) throws AmazonClientException,
- AmazonServiceException {
- delegate.deleteBucketPolicy(deleteBucketPolicyRequest);
+ public ListObjectVersionsIterable listObjectVersionsPaginator(ListObjectVersionsRequest listObjectVersionsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjectVersionsPaginator(listObjectVersionsRequest);
}
@Override
- public URL generatePresignedUrl(String bucketName, String key, Date expiration) throws AmazonClientException {
- return delegate.generatePresignedUrl(bucketName, key, expiration);
+ public ListObjectVersionsIterable listObjectVersionsPaginator(Consumer listObjectVersionsRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjectVersionsPaginator(listObjectVersionsRequest);
}
@Override
- public URL generatePresignedUrl(String bucketName, String key, Date expiration, HttpMethod method) throws AmazonClientException {
- return delegate.generatePresignedUrl(bucketName, key, expiration, method);
+ public ListObjectsResponse listObjects(ListObjectsRequest listObjectsRequest) throws NoSuchBucketException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.listObjects(listObjectsRequest);
}
@Override
- public URL generatePresignedUrl(GeneratePresignedUrlRequest generatePresignedUrlRequest) throws AmazonClientException {
- return delegate.generatePresignedUrl(generatePresignedUrlRequest);
+ public ListObjectsResponse listObjects(Consumer listObjectsRequest) throws NoSuchBucketException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjects(listObjectsRequest);
}
@Override
- public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws AmazonClientException,
- AmazonServiceException {
- return delegate.initiateMultipartUpload(request);
+ public ListObjectsV2Response listObjectsV2(ListObjectsV2Request listObjectsV2Request) throws NoSuchBucketException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.listObjectsV2(listObjectsV2Request);
}
@Override
- public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
- return delegate.uploadPart(request);
+ public ListObjectsV2Response listObjectsV2(Consumer listObjectsV2Request) throws NoSuchBucketException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjectsV2(listObjectsV2Request);
}
@Override
- public PartListing listParts(ListPartsRequest request) throws AmazonClientException, AmazonServiceException {
- return delegate.listParts(request);
+ public ListObjectsV2Iterable listObjectsV2Paginator(ListObjectsV2Request listObjectsV2Request) throws NoSuchBucketException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjectsV2Paginator(listObjectsV2Request);
}
@Override
- public void abortMultipartUpload(AbortMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
- delegate.abortMultipartUpload(request);
+ public ListObjectsV2Iterable listObjectsV2Paginator(Consumer listObjectsV2Request)
+ throws NoSuchBucketException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listObjectsV2Paginator(listObjectsV2Request);
}
@Override
- public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException,
- AmazonServiceException {
- return delegate.completeMultipartUpload(request);
+ public ListPartsResponse listParts(ListPartsRequest listPartsRequest) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.listParts(listPartsRequest);
}
@Override
- public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest request) throws AmazonClientException,
- AmazonServiceException {
- return delegate.listMultipartUploads(request);
+ public ListPartsResponse listParts(Consumer listPartsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.listParts(listPartsRequest);
}
@Override
- public S3ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
- return delegate.getCachedResponseMetadata(request);
+ public ListPartsIterable listPartsPaginator(ListPartsRequest listPartsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.listPartsPaginator(listPartsRequest);
}
@Override
- public void restoreObject(RestoreObjectRequest copyGlacierObjectRequest) throws AmazonServiceException {
- delegate.restoreObject(copyGlacierObjectRequest);
+ public ListPartsIterable listPartsPaginator(Consumer listPartsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.listPartsPaginator(listPartsRequest);
}
@Override
- public void restoreObject(String bucketName, String key, int expirationInDays) throws AmazonServiceException {
- delegate.restoreObject(bucketName, key, expirationInDays);
+ public PutBucketAccelerateConfigurationResponse putBucketAccelerateConfiguration(
+ PutBucketAccelerateConfigurationRequest putBucketAccelerateConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketAccelerateConfiguration(putBucketAccelerateConfigurationRequest);
}
@Override
- public void enableRequesterPays(String bucketName) throws AmazonServiceException, AmazonClientException {
- delegate.enableRequesterPays(bucketName);
+ public PutBucketAccelerateConfigurationResponse putBucketAccelerateConfiguration(
+ Consumer putBucketAccelerateConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketAccelerateConfiguration(putBucketAccelerateConfigurationRequest);
}
@Override
- public void disableRequesterPays(String bucketName) throws AmazonServiceException, AmazonClientException {
- delegate.disableRequesterPays(bucketName);
+ public PutBucketAclResponse putBucketAcl(PutBucketAclRequest putBucketAclRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.putBucketAcl(putBucketAclRequest);
}
@Override
- public boolean isRequesterPaysEnabled(String bucketName) throws AmazonServiceException, AmazonClientException {
- return delegate.isRequesterPaysEnabled(bucketName);
+ public PutBucketAclResponse putBucketAcl(Consumer putBucketAclRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putBucketAcl(putBucketAclRequest);
}
@Override
- public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest) throws AmazonClientException,
- AmazonServiceException {
- return delegate.listNextBatchOfObjects(listNextBatchOfObjectsRequest);
+ public PutBucketAnalyticsConfigurationResponse putBucketAnalyticsConfiguration(
+ PutBucketAnalyticsConfigurationRequest putBucketAnalyticsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketAnalyticsConfiguration(putBucketAnalyticsConfigurationRequest);
}
@Override
- public VersionListing listNextBatchOfVersions(ListNextBatchOfVersionsRequest listNextBatchOfVersionsRequest)
- throws AmazonClientException, AmazonServiceException {
- return delegate.listNextBatchOfVersions(listNextBatchOfVersionsRequest);
+ public PutBucketAnalyticsConfigurationResponse putBucketAnalyticsConfiguration(
+ Consumer putBucketAnalyticsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketAnalyticsConfiguration(putBucketAnalyticsConfigurationRequest);
}
@Override
- public Owner getS3AccountOwner(GetS3AccountOwnerRequest getS3AccountOwnerRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.getS3AccountOwner(getS3AccountOwnerRequest);
+ public PutBucketCorsResponse putBucketCors(PutBucketCorsRequest putBucketCorsRequest) throws AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.putBucketCors(putBucketCorsRequest);
}
@Override
- public BucketLoggingConfiguration getBucketLoggingConfiguration(
- GetBucketLoggingConfigurationRequest getBucketLoggingConfigurationRequest
- ) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketLoggingConfiguration(getBucketLoggingConfigurationRequest);
+ public PutBucketCorsResponse putBucketCors(Consumer putBucketCorsRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putBucketCors(putBucketCorsRequest);
}
@Override
- public BucketVersioningConfiguration getBucketVersioningConfiguration(
- GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest
- ) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketVersioningConfiguration(getBucketVersioningConfigurationRequest);
+ public PutBucketEncryptionResponse putBucketEncryption(PutBucketEncryptionRequest putBucketEncryptionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketEncryption(putBucketEncryptionRequest);
}
@Override
- public BucketLifecycleConfiguration getBucketLifecycleConfiguration(
- GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest
- ) {
- return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest);
+ public PutBucketEncryptionResponse putBucketEncryption(Consumer putBucketEncryptionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketEncryption(putBucketEncryptionRequest);
}
@Override
- public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(
- GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest
- ) {
- return delegate.getBucketCrossOriginConfiguration(getBucketCrossOriginConfigurationRequest);
+ public PutBucketIntelligentTieringConfigurationResponse putBucketIntelligentTieringConfiguration(
+ PutBucketIntelligentTieringConfigurationRequest putBucketIntelligentTieringConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketIntelligentTieringConfiguration(putBucketIntelligentTieringConfigurationRequest);
}
@Override
- public BucketTaggingConfiguration getBucketTaggingConfiguration(
- GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest
- ) {
- return delegate.getBucketTaggingConfiguration(getBucketTaggingConfigurationRequest);
+ public PutBucketIntelligentTieringConfigurationResponse putBucketIntelligentTieringConfiguration(
+ Consumer putBucketIntelligentTieringConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketIntelligentTieringConfiguration(putBucketIntelligentTieringConfigurationRequest);
}
@Override
- public BucketNotificationConfiguration getBucketNotificationConfiguration(
- GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest
- ) throws AmazonClientException, AmazonServiceException {
- return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest);
+ public PutBucketInventoryConfigurationResponse putBucketInventoryConfiguration(
+ PutBucketInventoryConfigurationRequest putBucketInventoryConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketInventoryConfiguration(putBucketInventoryConfigurationRequest);
}
@Override
- public BucketReplicationConfiguration getBucketReplicationConfiguration(
- GetBucketReplicationConfigurationRequest getBucketReplicationConfigurationRequest
- ) throws AmazonServiceException, AmazonClientException {
- return delegate.getBucketReplicationConfiguration(getBucketReplicationConfigurationRequest);
+ public PutBucketInventoryConfigurationResponse putBucketInventoryConfiguration(
+ Consumer putBucketInventoryConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketInventoryConfiguration(putBucketInventoryConfigurationRequest);
}
@Override
- public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws AmazonClientException, AmazonServiceException {
- return delegate.headBucket(headBucketRequest);
+ public PutBucketLifecycleConfigurationResponse putBucketLifecycleConfiguration(
+ PutBucketLifecycleConfigurationRequest putBucketLifecycleConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketLifecycleConfiguration(putBucketLifecycleConfigurationRequest);
+ }
+
+ @Override
+ public PutBucketLifecycleConfigurationResponse putBucketLifecycleConfiguration(
+ Consumer putBucketLifecycleConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketLifecycleConfiguration(putBucketLifecycleConfigurationRequest);
+ }
+
+ @Override
+ public PutBucketLoggingResponse putBucketLogging(PutBucketLoggingRequest putBucketLoggingRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putBucketLogging(putBucketLoggingRequest);
+ }
+
+ @Override
+ public PutBucketLoggingResponse putBucketLogging(Consumer putBucketLoggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketLogging(putBucketLoggingRequest);
+ }
+
+ @Override
+ public PutBucketMetricsConfigurationResponse putBucketMetricsConfiguration(
+ PutBucketMetricsConfigurationRequest putBucketMetricsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketMetricsConfiguration(putBucketMetricsConfigurationRequest);
+ }
+
+ @Override
+ public PutBucketMetricsConfigurationResponse putBucketMetricsConfiguration(
+ Consumer putBucketMetricsConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketMetricsConfiguration(putBucketMetricsConfigurationRequest);
+ }
+
+ @Override
+ public PutBucketNotificationConfigurationResponse putBucketNotificationConfiguration(
+ PutBucketNotificationConfigurationRequest putBucketNotificationConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketNotificationConfiguration(putBucketNotificationConfigurationRequest);
+ }
+
+ @Override
+ public PutBucketNotificationConfigurationResponse putBucketNotificationConfiguration(
+ Consumer putBucketNotificationConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketNotificationConfiguration(putBucketNotificationConfigurationRequest);
+ }
+
+ @Override
+ public PutBucketOwnershipControlsResponse putBucketOwnershipControls(
+ PutBucketOwnershipControlsRequest putBucketOwnershipControlsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketOwnershipControls(putBucketOwnershipControlsRequest);
+ }
+
+ @Override
+ public PutBucketOwnershipControlsResponse putBucketOwnershipControls(
+ Consumer putBucketOwnershipControlsRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketOwnershipControls(putBucketOwnershipControlsRequest);
+ }
+
+ @Override
+ public PutBucketPolicyResponse putBucketPolicy(PutBucketPolicyRequest putBucketPolicyRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putBucketPolicy(putBucketPolicyRequest);
+ }
+
+ @Override
+ public PutBucketPolicyResponse putBucketPolicy(Consumer putBucketPolicyRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketPolicy(putBucketPolicyRequest);
+ }
+
+ @Override
+ public PutBucketReplicationResponse putBucketReplication(PutBucketReplicationRequest putBucketReplicationRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketReplication(putBucketReplicationRequest);
+ }
+
+ @Override
+ public PutBucketReplicationResponse putBucketReplication(Consumer putBucketReplicationRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketReplication(putBucketReplicationRequest);
+ }
+
+ @Override
+ public PutBucketRequestPaymentResponse putBucketRequestPayment(PutBucketRequestPaymentRequest putBucketRequestPaymentRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketRequestPayment(putBucketRequestPaymentRequest);
+ }
+
+ @Override
+ public PutBucketRequestPaymentResponse putBucketRequestPayment(
+ Consumer putBucketRequestPaymentRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketRequestPayment(putBucketRequestPaymentRequest);
+ }
+
+ @Override
+ public PutBucketTaggingResponse putBucketTagging(PutBucketTaggingRequest putBucketTaggingRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putBucketTagging(putBucketTaggingRequest);
+ }
+
+ @Override
+ public PutBucketTaggingResponse putBucketTagging(Consumer putBucketTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketTagging(putBucketTaggingRequest);
+ }
+
+ @Override
+ public PutBucketVersioningResponse putBucketVersioning(PutBucketVersioningRequest putBucketVersioningRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketVersioning(putBucketVersioningRequest);
+ }
+
+ @Override
+ public PutBucketVersioningResponse putBucketVersioning(Consumer putBucketVersioningRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketVersioning(putBucketVersioningRequest);
+ }
+
+ @Override
+ public PutBucketWebsiteResponse putBucketWebsite(PutBucketWebsiteRequest putBucketWebsiteRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putBucketWebsite(putBucketWebsiteRequest);
+ }
+
+ @Override
+ public PutBucketWebsiteResponse putBucketWebsite(Consumer putBucketWebsiteRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putBucketWebsite(putBucketWebsiteRequest);
+ }
+
+ @Override
+ public PutObjectResponse putObject(PutObjectRequest putObjectRequest, RequestBody requestBody) throws InvalidRequestException,
+ InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.putObject(putObjectRequest, requestBody);
+ }
+
+ @Override
+ public PutObjectResponse putObject(Consumer putObjectRequest, RequestBody requestBody)
+ throws InvalidRequestException, InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObject(putObjectRequest, requestBody);
+ }
+
+ @Override
+ public PutObjectResponse putObject(PutObjectRequest putObjectRequest, Path sourcePath) throws InvalidRequestException,
+ InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.putObject(putObjectRequest, sourcePath);
+ }
+
+ @Override
+ public PutObjectResponse putObject(Consumer putObjectRequest, Path sourcePath) throws InvalidRequestException,
+ InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, AwsServiceException, SdkClientException,
+ S3Exception {
+ return delegate.putObject(putObjectRequest, sourcePath);
+ }
+
+ @Override
+ public PutObjectAclResponse putObjectAcl(PutObjectAclRequest putObjectAclRequest) throws NoSuchKeyException, AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putObjectAcl(putObjectAclRequest);
+ }
+
+ @Override
+ public PutObjectAclResponse putObjectAcl(Consumer putObjectAclRequest) throws NoSuchKeyException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObjectAcl(putObjectAclRequest);
+ }
+
+ @Override
+ public PutObjectLegalHoldResponse putObjectLegalHold(PutObjectLegalHoldRequest putObjectLegalHoldRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putObjectLegalHold(putObjectLegalHoldRequest);
+ }
+
+ @Override
+ public PutObjectLegalHoldResponse putObjectLegalHold(Consumer putObjectLegalHoldRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObjectLegalHold(putObjectLegalHoldRequest);
+ }
+
+ @Override
+ public PutObjectLockConfigurationResponse putObjectLockConfiguration(
+ PutObjectLockConfigurationRequest putObjectLockConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObjectLockConfiguration(putObjectLockConfigurationRequest);
+ }
+
+ @Override
+ public PutObjectLockConfigurationResponse putObjectLockConfiguration(
+ Consumer putObjectLockConfigurationRequest
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObjectLockConfiguration(putObjectLockConfigurationRequest);
+ }
+
+ @Override
+ public PutObjectRetentionResponse putObjectRetention(PutObjectRetentionRequest putObjectRetentionRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putObjectRetention(putObjectRetentionRequest);
+ }
+
+ @Override
+ public PutObjectRetentionResponse putObjectRetention(Consumer putObjectRetentionRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObjectRetention(putObjectRetentionRequest);
+ }
+
+ @Override
+ public PutObjectTaggingResponse putObjectTagging(PutObjectTaggingRequest putObjectTaggingRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.putObjectTagging(putObjectTaggingRequest);
+ }
+
+ @Override
+ public PutObjectTaggingResponse putObjectTagging(Consumer putObjectTaggingRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putObjectTagging(putObjectTaggingRequest);
+ }
+
+ @Override
+ public PutPublicAccessBlockResponse putPublicAccessBlock(PutPublicAccessBlockRequest putPublicAccessBlockRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putPublicAccessBlock(putPublicAccessBlockRequest);
+ }
+
+ @Override
+ public PutPublicAccessBlockResponse putPublicAccessBlock(Consumer putPublicAccessBlockRequest)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.putPublicAccessBlock(putPublicAccessBlockRequest);
+ }
+
+ @Override
+ public RestoreObjectResponse restoreObject(RestoreObjectRequest restoreObjectRequest) throws ObjectAlreadyInActiveTierErrorException,
+ AwsServiceException, SdkClientException, S3Exception {
+ return delegate.restoreObject(restoreObjectRequest);
+ }
+
+ @Override
+ public RestoreObjectResponse restoreObject(Consumer restoreObjectRequest)
+ throws ObjectAlreadyInActiveTierErrorException, AwsServiceException, SdkClientException, S3Exception {
+ return delegate.restoreObject(restoreObjectRequest);
+ }
+
+ @Override
+ public UploadPartResponse uploadPart(UploadPartRequest uploadPartRequest, RequestBody requestBody) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.uploadPart(uploadPartRequest, requestBody);
+ }
+
+ @Override
+ public UploadPartResponse uploadPart(Consumer uploadPartRequest, RequestBody requestBody)
+ throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.uploadPart(uploadPartRequest, requestBody);
+ }
+
+ @Override
+ public UploadPartResponse uploadPart(UploadPartRequest uploadPartRequest, Path sourcePath) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.uploadPart(uploadPartRequest, sourcePath);
+ }
+
+ @Override
+ public UploadPartResponse uploadPart(Consumer uploadPartRequest, Path sourcePath) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.uploadPart(uploadPartRequest, sourcePath);
+ }
+
+ @Override
+ public UploadPartCopyResponse uploadPartCopy(UploadPartCopyRequest uploadPartCopyRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.uploadPartCopy(uploadPartCopyRequest);
+ }
+
+ @Override
+ public UploadPartCopyResponse uploadPartCopy(Consumer uploadPartCopyRequest) throws AwsServiceException,
+ SdkClientException, S3Exception {
+ return delegate.uploadPartCopy(uploadPartCopyRequest);
+ }
+
+ @Override
+ public WriteGetObjectResponseResponse writeGetObjectResponse(
+ WriteGetObjectResponseRequest writeGetObjectResponseRequest,
+ RequestBody requestBody
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, requestBody);
+ }
+
+ @Override
+ public WriteGetObjectResponseResponse writeGetObjectResponse(
+ Consumer writeGetObjectResponseRequest,
+ RequestBody requestBody
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, requestBody);
+ }
+
+ @Override
+ public WriteGetObjectResponseResponse writeGetObjectResponse(
+ WriteGetObjectResponseRequest writeGetObjectResponseRequest,
+ Path sourcePath
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, sourcePath);
+ }
+
+ @Override
+ public WriteGetObjectResponseResponse writeGetObjectResponse(
+ Consumer writeGetObjectResponseRequest,
+ Path sourcePath
+ ) throws AwsServiceException, SdkClientException, S3Exception {
+ return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, sourcePath);
+ }
+
+ @Override
+ public S3Utilities utilities() {
+ return delegate.utilities();
+ }
+
+ @Override
+ public S3Waiter waiter() {
+ return delegate.waiter();
}
@Override
- public void shutdown() {
- delegate.shutdown();
+ public S3ServiceClientConfiguration serviceClientConfiguration() {
+ return delegate.serviceClientConfiguration();
}
}
diff --git a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java
index eb4cd955c81b1..6074219823da1 100644
--- a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java
+++ b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java
@@ -9,9 +9,10 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.services.s3.AmazonS3;
+import software.amazon.awssdk.auth.credentials.AwsCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.services.s3.S3Client;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -89,12 +90,15 @@ public void testRepositoryCredentialsOverrideSecureCredentials() {
assertThat(repositories.repository(repositoryName), instanceOf(S3Repository.class));
final S3Repository repository = (S3Repository) repositories.repository(repositoryName);
- final AmazonS3 client = repository.createBlobStore().clientReference().client();
- assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class));
-
- final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials();
- assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
- assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
+ try (var clientReference = repository.createBlobStore().clientReference()) {
+ final ProxyS3RepositoryPlugin.ClientAndCredentials client = asInstanceOf(
+ ProxyS3RepositoryPlugin.ClientAndCredentials.class,
+ clientReference.client()
+ );
+ final AwsCredentials credentials = client.credentials.resolveCredentials();
+ assertThat(credentials.accessKeyId(), is("insecure_aws_key"));
+ assertThat(credentials.secretAccessKey(), is("insecure_aws_secret"));
+ }
assertCriticalWarnings(
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release. "
@@ -127,19 +131,19 @@ public void testReinitSecureCredentials() {
final S3Repository repository = (S3Repository) repositories.repository(repositoryName);
try (AmazonS3Reference clientReference = ((S3BlobStore) repository.blobStore()).clientReference()) {
- final AmazonS3 client = clientReference.client();
+ final S3Client client = clientReference.client();
assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class));
- final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials();
+ final AwsCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.resolveCredentials();
if (hasInsecureSettings) {
- assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
- assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
+ assertThat(credentials.accessKeyId(), is("insecure_aws_key"));
+ assertThat(credentials.secretAccessKey(), is("insecure_aws_secret"));
} else if ("other".equals(clientName)) {
- assertThat(credentials.getAWSAccessKeyId(), is("secure_other_key"));
- assertThat(credentials.getAWSSecretKey(), is("secure_other_secret"));
+ assertThat(credentials.accessKeyId(), is("secure_other_key"));
+ assertThat(credentials.secretAccessKey(), is("secure_other_secret"));
} else {
- assertThat(credentials.getAWSAccessKeyId(), is("secure_default_key"));
- assertThat(credentials.getAWSSecretKey(), is("secure_default_secret"));
+ assertThat(credentials.accessKeyId(), is("secure_default_key"));
+ assertThat(credentials.secretAccessKey(), is("secure_default_secret"));
}
// new settings
@@ -157,29 +161,29 @@ public void testReinitSecureCredentials() {
// check the not-yet-closed client reference still has the same credentials
if (hasInsecureSettings) {
- assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
- assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
+ assertThat(credentials.accessKeyId(), is("insecure_aws_key"));
+ assertThat(credentials.secretAccessKey(), is("insecure_aws_secret"));
} else if ("other".equals(clientName)) {
- assertThat(credentials.getAWSAccessKeyId(), is("secure_other_key"));
- assertThat(credentials.getAWSSecretKey(), is("secure_other_secret"));
+ assertThat(credentials.accessKeyId(), is("secure_other_key"));
+ assertThat(credentials.secretAccessKey(), is("secure_other_secret"));
} else {
- assertThat(credentials.getAWSAccessKeyId(), is("secure_default_key"));
- assertThat(credentials.getAWSSecretKey(), is("secure_default_secret"));
+ assertThat(credentials.accessKeyId(), is("secure_default_key"));
+ assertThat(credentials.secretAccessKey(), is("secure_default_secret"));
}
}
// check credentials have been updated
try (AmazonS3Reference clientReference = ((S3BlobStore) repository.blobStore()).clientReference()) {
- final AmazonS3 client = clientReference.client();
+ final S3Client client = clientReference.client();
assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class));
- final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials();
+ final AwsCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.resolveCredentials();
if (hasInsecureSettings) {
- assertThat(newCredentials.getAWSAccessKeyId(), is("insecure_aws_key"));
- assertThat(newCredentials.getAWSSecretKey(), is("insecure_aws_secret"));
+ assertThat(newCredentials.accessKeyId(), is("insecure_aws_key"));
+ assertThat(newCredentials.secretAccessKey(), is("insecure_aws_secret"));
} else {
- assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key"));
- assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret"));
+ assertThat(newCredentials.accessKeyId(), is("new_secret_aws_key"));
+ assertThat(newCredentials.secretAccessKey(), is("new_secret_aws_secret"));
}
}
@@ -256,27 +260,51 @@ S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatc
return new ProxyS3Service(environment, nodeSettings, resourceWatcherService);
}
+ /**
+ * This wrapper exposes a copy of the AWS credentials that the S3Client uses.
+ */
public static final class ClientAndCredentials extends AmazonS3Wrapper {
- final AWSCredentialsProvider credentials;
+ final AwsCredentialsProvider credentials;
+ // The httpClient must be explicitly closed. Closure of the S3Client, which uses the httpClient, will not do so.
+ private final SdkHttpClient httpClient;
- ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) {
+ ClientAndCredentials(S3Client delegate, SdkHttpClient httpClient, AwsCredentialsProvider credentials) {
super(delegate);
+ this.httpClient = httpClient;
this.credentials = credentials;
}
+
+ @Override
+ public String serviceName() {
+ return "ClientAndCredentials";
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ httpClient.close();
+ }
}
+ /**
+ * A {@link S3Service} wrapper that supports access to a copy of the credentials given to the S3Client.
+ */
public static final class ProxyS3Service extends S3Service {
private static final Logger logger = LogManager.getLogger(ProxyS3Service.class);
ProxyS3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) {
- super(environment, nodeSettings, resourceWatcherService);
+ super(environment, nodeSettings, resourceWatcherService, () -> null);
}
@Override
- AmazonS3 buildClient(final S3ClientSettings clientSettings) {
- final AmazonS3 client = super.buildClient(clientSettings);
- return new ClientAndCredentials(client, buildCredentials(logger, clientSettings, webIdentityTokenCredentialsProvider));
+ S3Client buildClient(final S3ClientSettings clientSettings, SdkHttpClient httpClient) {
+ final S3Client client = super.buildClient(clientSettings, httpClient);
+ return new ClientAndCredentials(
+ client,
+ httpClient,
+ buildCredentials(logger, clientSettings, webIdentityTokenCredentialsProvider)
+ );
}
}
diff --git a/modules/repository-s3/qa/third-party/build.gradle b/modules/repository-s3/qa/third-party/build.gradle
index 49cdd2665667f..acf912e6c0136 100644
--- a/modules/repository-s3/qa/third-party/build.gradle
+++ b/modules/repository-s3/qa/third-party/build.gradle
@@ -12,6 +12,12 @@ dependencies {
testImplementation project(':test:fixtures:minio-fixture')
testImplementation project(':test:framework')
testImplementation project(':server')
+
+ testImplementation "software.amazon.awssdk:aws-core:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:http-client-spi:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:s3:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:sdk-core:${versions.awsv2sdk}"
+ testImplementation "software.amazon.awssdk:utils:${versions.awsv2sdk}"
}
boolean useFixture = false
diff --git a/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java
index 04a743b90efa6..2963f0dbd8575 100644
--- a/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java
+++ b/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java
@@ -8,11 +8,12 @@
*/
package org.elasticsearch.repositories.s3;
-import com.amazonaws.services.s3.model.AmazonS3Exception;
-import com.amazonaws.services.s3.model.GetObjectRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ListMultipartUploadsRequest;
-import com.amazonaws.services.s3.model.MultipartUpload;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;
+import software.amazon.awssdk.services.s3.model.MultipartUpload;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
@@ -176,8 +177,9 @@ BytesReference readRegister() {
}
List listMultipartUploads() {
- return client.listMultipartUploads(new ListMultipartUploadsRequest(bucketName).withPrefix(registerBlobPath))
- .getMultipartUploads();
+ return client.listMultipartUploads(
+ ListMultipartUploadsRequest.builder().bucket(bucketName).prefix(registerBlobPath).build()
+ ).uploads();
}
}
@@ -191,11 +193,11 @@ List listMultipartUploads() {
assertEquals(bytes1, testHarness.readRegister());
assertArrayEquals(
bytes1.array(),
- client.getObject(new GetObjectRequest(bucketName, registerBlobPath)).getObjectContent().readAllBytes()
+ client.getObject(GetObjectRequest.builder().bucket(bucketName).key(registerBlobPath).build()).readAllBytes()
);
// a fresh ongoing upload blocks other CAS attempts
- client.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, registerBlobPath));
+ client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(bucketName).key(registerBlobPath).build());
assertThat(testHarness.listMultipartUploads(), hasSize(1));
assertFalse(testHarness.tryCompareAndSet(bytes1, bytes2));
@@ -203,10 +205,7 @@ List listMultipartUploads() {
assertThat(multipartUploads, hasSize(1));
// repo clock may not be exactly aligned with ours, but it should be close
- final var age = blobStore.getThreadPool().absoluteTimeInMillis() - multipartUploads.get(0)
- .getInitiated()
- .toInstant()
- .toEpochMilli();
+ final var age = blobStore.getThreadPool().absoluteTimeInMillis() - multipartUploads.get(0).initiated().toEpochMilli();
final var ageRangeMillis = TimeValue.timeValueMinutes(1).millis();
assertThat(age, allOf(greaterThanOrEqualTo(-ageRangeMillis), lessThanOrEqualTo(ageRangeMillis)));
@@ -224,9 +223,11 @@ List listMultipartUploads() {
}
public void testReadFromPositionLargerThanBlobLength() {
- testReadFromPositionLargerThanBlobLength(
- e -> asInstanceOf(AmazonS3Exception.class, e.getCause()).getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()
- );
+ testReadFromPositionLargerThanBlobLength(e -> {
+ final var s3Exception = asInstanceOf(S3Exception.class, e.getCause());
+ return s3Exception.statusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()
+ && "InvalidRange".equals(s3Exception.awsErrorDetails().errorCode());
+ });
}
public void testCopy() {
diff --git a/modules/repository-s3/qa/web-identity-token/build.gradle b/modules/repository-s3/qa/web-identity-token/build.gradle
index 4346e1f4547e1..b87c52663d241 100644
--- a/modules/repository-s3/qa/web-identity-token/build.gradle
+++ b/modules/repository-s3/qa/web-identity-token/build.gradle
@@ -11,8 +11,10 @@ dependencies {
testImplementation project(':modules:repository-s3')
testImplementation project(':test:framework')
testImplementation project(':server')
+ testImplementation "software.amazon.awssdk:auth:${versions.awsv2sdk}"
+ implementation "software.amazon.awssdk:identity-spi:${versions.awsv2sdk}"
}
tasks.named("test").configure {
- systemProperty 'es.allow_insecure_settings', 'true'
+ environment 'AWS_REGION', 'es-test-region'
}
diff --git a/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java
index 2698eb718ded0..03ac986d038f7 100644
--- a/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java
+++ b/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java
@@ -9,8 +9,9 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.AWSCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.AwsCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+
import com.sun.net.httpserver.HttpServer;
import org.apache.logging.log4j.LogManager;
@@ -43,6 +44,7 @@
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.stream.Collectors;
@@ -108,7 +110,9 @@ private static HttpServer getHttpServer(Consumer webIdentityTokenCheck)
""",
ROLE_ARN,
ROLE_NAME,
- ZonedDateTime.now().plusDays(1L).format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"))
+ ZonedDateTime.now(Clock.systemUTC())
+ .plusSeconds(1L) // short expiry to force a reload
+ .format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssZ"))
).getBytes(StandardCharsets.UTF_8);
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
exchange.getResponseBody().write(response);
@@ -121,7 +125,7 @@ private static HttpServer getHttpServer(Consumer webIdentityTokenCheck)
@SuppressForbidden(reason = "HTTP server is used for testing")
private static Map getSystemProperties(HttpServer httpServer) {
return Map.of(
- "com.amazonaws.sdk.stsMetadataServiceEndpointOverride",
+ "org.elasticsearch.repositories.s3.stsEndpointOverride",
"http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort()
);
}
@@ -130,9 +134,9 @@ private static Map environmentVariables() {
return Map.of("AWS_WEB_IDENTITY_TOKEN_FILE", "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", "AWS_ROLE_ARN", ROLE_ARN);
}
- private static void assertCredentials(AWSCredentials credentials) {
- Assert.assertFalse(credentials.getAWSAccessKeyId().isEmpty());
- Assert.assertFalse(credentials.getAWSSecretKey().isEmpty());
+ private static void assertCredentials(AwsCredentials credentials) {
+ Assert.assertFalse(credentials.accessKeyId().isEmpty());
+ Assert.assertFalse(credentials.secretAccessKey().isEmpty());
}
@SuppressForbidden(reason = "HTTP server is used for testing")
@@ -152,15 +156,15 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception {
resourceWatcherService
);
try {
- AWSCredentials credentials = S3Service.buildCredentials(
+ AwsCredentials credentials = S3Service.buildCredentials(
LogManager.getLogger(S3Service.class),
S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8)),
webIdentityTokenCredentialsProvider
- ).getCredentials();
+ ).resolveCredentials();
assertCredentials(credentials);
} finally {
- webIdentityTokenCredentialsProvider.shutdown();
+ webIdentityTokenCredentialsProvider.close();
httpServer.stop(0);
}
}
@@ -198,12 +202,12 @@ public void testPickUpNewWebIdentityTokenWhenItsChanged() throws Exception {
resourceWatcherService
);
try {
- AWSCredentialsProvider awsCredentialsProvider = S3Service.buildCredentials(
+ AwsCredentialsProvider awsCredentialsProvider = S3Service.buildCredentials(
LogManager.getLogger(S3Service.class),
S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8)),
webIdentityTokenCredentialsProvider
);
- assertCredentials(awsCredentialsProvider.getCredentials());
+ assertCredentials(awsCredentialsProvider.resolveCredentials());
var latch = new CountDownLatch(1);
String newWebIdentityToken = "88f84342080d4671a511e10ae905b2b0";
@@ -213,41 +217,14 @@ public void testPickUpNewWebIdentityTokenWhenItsChanged() throws Exception {
}
});
Files.writeString(environment.configDir().resolve("repository-s3/aws-web-identity-token-file"), newWebIdentityToken);
-
- safeAwait(latch);
- assertCredentials(awsCredentialsProvider.getCredentials());
+ do {
+ // re-resolve credentials in order to trigger a refresh
+ assertCredentials(awsCredentialsProvider.resolveCredentials());
+ } while (latch.await(500, TimeUnit.MILLISECONDS) == false);
+ assertCredentials(awsCredentialsProvider.resolveCredentials());
} finally {
- webIdentityTokenCredentialsProvider.shutdown();
+ webIdentityTokenCredentialsProvider.close();
httpServer.stop(0);
}
}
-
- public void testSupportRegionalizedEndpoints() throws Exception {
- Map environmentVariables = Map.of(
- "AWS_WEB_IDENTITY_TOKEN_FILE",
- "/var/run/secrets/eks.amazonaws.com/serviceaccount/token",
- "AWS_ROLE_ARN",
- ROLE_ARN,
- "AWS_STS_REGIONAL_ENDPOINTS",
- "regional",
- "AWS_REGION",
- "us-west-2"
- );
- Map systemProperties = Map.of();
-
- var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider(
- getEnvironment(),
- environmentVariables::get,
- systemProperties::getOrDefault,
- Clock.systemUTC(),
- resourceWatcherService
- );
- // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com"
- // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends
- // on the system environment that we can't change in the test. So we just verify we that we called `withRegion`
- // on stsClientBuilder which should internally correctly configure the endpoint when the STS client is built.
- assertEquals("us-west-2", webIdentityTokenCredentialsProvider.getStsRegion());
-
- webIdentityTokenCredentialsProvider.shutdown();
- }
}
diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java
index b1c5d707220af..a7d8669b9db7a 100644
--- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java
+++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java
@@ -166,9 +166,9 @@ public void testMetricsWithErrors() throws IOException {
assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.PUT_OBJECT), equalTo(4L * batch));
assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.PUT_OBJECT), equalTo(batch));
assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.PUT_OBJECT), equalTo(0L));
- assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(batch));
+ assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(3L * batch));
assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.PUT_OBJECT), equalTo(2L * batch));
- assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch));
+ assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(3L * batch));
assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.PUT_OBJECT), equalTo(2L * batch));
assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch));
}
@@ -186,9 +186,9 @@ public void testMetricsWithErrors() throws IOException {
assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.GET_OBJECT), equalTo(2L * batch));
assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch));
assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch));
- assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch));
+ assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(2L * batch));
assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(batch));
- assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch));
+ assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(2L * batch));
assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch));
assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch));
@@ -209,9 +209,9 @@ public void testMetricsWithErrors() throws IOException {
assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch));
assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch));
assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch));
- assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch));
+ assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch));
assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch));
- assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch));
+ assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch));
assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch));
assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch));
}
@@ -249,8 +249,8 @@ public void testMetricsForRequestRangeNotSatisfied() {
assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.GET_OBJECT), equalTo(3 * batch));
assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch));
assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch));
- assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch));
- assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch));
+ assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(3 * batch));
+ assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(3 * batch));
assertThat(
getLongCounterValue(plugin, METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, Operation.GET_OBJECT),
equalTo(batch)
@@ -362,7 +362,7 @@ public void testPutDoesNotRetryOn403InStateful() {
);
}
});
- assertThat(exception.getCause().getMessage(), containsString("InvalidAccessKeyId"));
+ assertThat(exception.getCause().getMessage(), containsString("Status Code: 403"));
assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.PUT_OBJECT), equalTo(1L));
assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(1L));
diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java
index a30cf9086b96c..4f4eb24b5a9ca 100644
--- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java
+++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java
@@ -9,11 +9,12 @@
package org.elasticsearch.repositories.s3;
import fixture.s3.S3HttpHandler;
+import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration;
+import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;
+import software.amazon.awssdk.services.s3.model.MultipartUpload;
-import com.amazonaws.http.AmazonHttpClient;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ListMultipartUploadsRequest;
-import com.amazonaws.services.s3.model.MultipartUpload;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
@@ -81,18 +82,20 @@
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
+import static fixture.aws.AwsCredentialsUtils.isValidAwsV4SignedAuthorizationHeader;
import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_TOTAL;
import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName;
import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomNonDataPurpose;
+import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.allOf;
-import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@@ -101,7 +104,6 @@
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.not;
-import static org.hamcrest.Matchers.startsWith;
@SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint")
// Need to set up a new cluster for each test because cluster settings use randomized authentication settings
@@ -111,7 +113,6 @@ public class S3BlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTes
private static final TimeValue TEST_COOLDOWN_PERIOD = TimeValue.timeValueSeconds(10L);
private String region;
- private String signerOverride;
private final AtomicBoolean shouldFailCompleteMultipartUploadRequest = new AtomicBoolean();
@Override
@@ -119,11 +120,6 @@ public void setUp() throws Exception {
if (randomBoolean()) {
region = "test-region";
}
- if (region != null && randomBoolean()) {
- signerOverride = randomFrom("AWS3SignerType", "AWS4SignerType");
- } else if (randomBoolean()) {
- signerOverride = "AWS3SignerType";
- }
shouldFailCompleteMultipartUploadRequest.set(false);
super.setUp();
}
@@ -171,17 +167,12 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
final Settings.Builder builder = Settings.builder()
.put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that verify an exact wait time
.put(S3ClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl())
- // Disable request throttling because some random values in tests might generate too many failures for the S3 client
- .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false)
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.setSecureSettings(secureSettings);
if (randomBoolean()) {
builder.put(S3ClientSettings.DISABLE_CHUNKED_ENCODING.getConcreteSettingForNamespace("test").getKey(), randomBoolean());
}
- if (signerOverride != null) {
- builder.put(S3ClientSettings.SIGNER_OVERRIDE.getConcreteSettingForNamespace("test").getKey(), signerOverride);
- }
if (region != null) {
builder.put(S3ClientSettings.REGION.getConcreteSettingForNamespace("test").getKey(), region);
}
@@ -512,30 +503,34 @@ public void testMultipartUploadCleanup() {
try (var clientRef = blobStore.clientReference()) {
final var danglingBlobName = randomIdentifier();
- final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
- blobStore.bucket(),
- blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName
- );
- initiateMultipartUploadRequest.putCustomQueryParameter(
- S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE,
- OperationPurpose.SNAPSHOT_DATA.getKey()
- );
- final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest);
+ final var initiateMultipartUploadRequest = CreateMultipartUploadRequest.builder()
+ .bucket(blobStore.bucket())
+ .key(blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName)
+ .overrideConfiguration(
+ AwsRequestOverrideConfiguration.builder()
+ .putRawQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, randomPurpose().getKey())
+ .build()
+ )
+ .build();
- final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix(
- repository.basePath().buildAsString()
- );
- listMultipartUploadsRequest.putCustomQueryParameter(
- S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE,
- OperationPurpose.SNAPSHOT_DATA.getKey()
- );
+ final var multipartUploadResult = clientRef.client().createMultipartUpload(initiateMultipartUploadRequest);
+
+ final var listMultipartUploadsRequest = ListMultipartUploadsRequest.builder()
+ .bucket(blobStore.bucket())
+ .prefix(repository.basePath().buildAsString())
+ .overrideConfiguration(
+ AwsRequestOverrideConfiguration.builder()
+ .putRawQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, randomPurpose().getKey())
+ .build()
+ )
+ .build();
assertEquals(
- List.of(multipartUploadResult.getUploadId()),
+ List.of(multipartUploadResult.uploadId()),
clientRef.client()
.listMultipartUploads(listMultipartUploadsRequest)
- .getMultipartUploads()
+ .uploads()
.stream()
- .map(MultipartUpload::getUploadId)
+ .map(MultipartUpload::uploadId)
.toList()
);
@@ -557,7 +552,7 @@ public void testMultipartUploadCleanup() {
Level.INFO,
Strings.format(
"cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]",
- multipartUploadResult.getUploadId(),
+ multipartUploadResult.uploadId(),
repoName,
danglingBlobName
)
@@ -575,9 +570,9 @@ public void match(LogEvent event) {
assertThat(
clientRef.client()
.listMultipartUploads(listMultipartUploadsRequest)
- .getMultipartUploads()
+ .uploads()
.stream()
- .map(MultipartUpload::getUploadId)
+ .map(MultipartUpload::uploadId)
.toList(),
empty()
);
@@ -647,23 +642,16 @@ protected class S3BlobStoreHttpHandler extends S3HttpHandler implements BlobStor
@Override
public void handle(final HttpExchange exchange) throws IOException {
- validateAuthHeader(exchange);
+ assertTrue(
+ isValidAwsV4SignedAuthorizationHeader(
+ "test_access_key",
+ Objects.requireNonNullElse(region, "us-east-1"),
+ "s3",
+ exchange.getRequestHeaders().getFirst("Authorization")
+ )
+ );
super.handle(exchange);
}
-
- private void validateAuthHeader(HttpExchange exchange) {
- final String authorizationHeaderV4 = exchange.getRequestHeaders().getFirst("Authorization");
- final String authorizationHeaderV3 = exchange.getRequestHeaders().getFirst("X-amzn-authorization");
-
- if ("AWS3SignerType".equals(signerOverride)) {
- assertThat(authorizationHeaderV3, startsWith("AWS3"));
- } else if ("AWS4SignerType".equals(signerOverride)) {
- assertThat(authorizationHeaderV4, containsString("aws4_request"));
- }
- if (region != null && authorizationHeaderV4 != null) {
- assertThat(authorizationHeaderV4, containsString("/" + region + "/s3/"));
- }
- }
}
/**
@@ -675,14 +663,31 @@ private void validateAuthHeader(HttpExchange exchange) {
@SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint")
protected static class S3ErroneousHttpHandler extends ErroneousHttpHandler {
+ // S3 SDK stops retrying after TOKEN_BUCKET_SIZE/DEFAULT_EXCEPTION_TOKEN_COST == 500/5 == 100 failures in quick succession
+ // see software.amazon.awssdk.retries.DefaultRetryStrategy.Legacy.TOKEN_BUCKET_SIZE
+ // see software.amazon.awssdk.retries.DefaultRetryStrategy.Legacy.DEFAULT_EXCEPTION_TOKEN_COST
+ private final Semaphore failurePermits = new Semaphore(99);
+
S3ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) {
super(delegate, maxErrorsPerRequest);
}
+ /**
+ * Bypasses {@link ErroneousHttpHandler#handle} once we exhaust {@link #failurePermits} because S3 will start rate limiting.
+ */
+ @Override
+ public void handle(HttpExchange exchange) throws IOException {
+ if (failurePermits.tryAcquire()) {
+ super.handle(exchange);
+ } else {
+ delegate.handle(exchange);
+ }
+ }
+
@Override
protected String requestUniqueId(final HttpExchange exchange) {
// Amazon SDK client provides a unique ID per request
- return exchange.getRequestHeaders().getFirst(AmazonHttpClient.HEADER_SDK_TRANSACTION_ID);
+ return exchange.getRequestHeaders().getFirst(ApplyTransactionIdStage.HEADER_SDK_TRANSACTION_ID);
}
}
@@ -709,6 +714,7 @@ public void handle(HttpExchange exchange) throws IOException {
assertTrue(s3Request.hasQueryParamOnce(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE));
}
if (shouldFailCompleteMultipartUploadRequest.get() && s3Request.isCompleteMultipartUploadRequest()) {
+ trackRequest("PutMultipartObject");
try (exchange) {
drainInputStream(exchange.getRequestBody());
exchange.sendResponseHeaders(
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java
index 5d82892cd3d71..dfe3bbbc2613e 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java
@@ -9,6 +9,7 @@
package org.elasticsearch.repositories.s3;
+import fixture.aws.DynamicRegionSupplier;
import fixture.s3.S3HttpFixture;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@@ -20,7 +21,8 @@
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
-import static fixture.aws.AwsCredentialsUtils.ANY_REGION;
+import java.util.function.Supplier;
+
import static fixture.aws.AwsCredentialsUtils.fixedAccessKey;
@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
@@ -34,10 +36,17 @@ public class RepositoryS3BasicCredentialsRestIT extends AbstractRepositoryS3Rest
private static final String SECRET_KEY = PREFIX + "secret-key";
private static final String CLIENT = "basic_credentials_client";
- private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, fixedAccessKey(ACCESS_KEY, ANY_REGION, "s3"));
+ private static final Supplier regionSupplier = new DynamicRegionSupplier();
+ private static final S3HttpFixture s3Fixture = new S3HttpFixture(
+ true,
+ BUCKET,
+ BASE_PATH,
+ fixedAccessKey(ACCESS_KEY, regionSupplier, "s3")
+ );
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("repository-s3")
+ .systemProperty("aws.region", regionSupplier)
.keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY)
.keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY)
.setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress)
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java
index 66fcdc4ececf4..d365fde6eec8c 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java
@@ -10,6 +10,7 @@
package org.elasticsearch.repositories.s3;
import fixture.aws.DynamicAwsCredentials;
+import fixture.aws.DynamicRegionSupplier;
import fixture.aws.imds.Ec2ImdsHttpFixture;
import fixture.aws.imds.Ec2ImdsServiceBuilder;
import fixture.aws.imds.Ec2ImdsVersion;
@@ -25,6 +26,7 @@
import org.junit.rules.TestRule;
import java.util.Set;
+import java.util.function.Supplier;
@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482
@@ -35,7 +37,8 @@ public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTe
private static final String BASE_PATH = PREFIX + "base_path";
private static final String CLIENT = "ecs_credentials_client";
- private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3");
+ private static final Supplier regionSupplier = new DynamicRegionSupplier();
+ private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(regionSupplier, "s3");
private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture(
new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicCredentials::addValidCredentials)
@@ -48,6 +51,7 @@ public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTe
.module("repository-s3")
.setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress)
.environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> ec2ImdsHttpFixture.getAddress() + "/ecs_credentials_endpoint")
+ .environment("AWS_REGION", regionSupplier) // Region is supplied by environment variable when running in ECS
.build();
@ClassRule
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java
deleted file mode 100644
index 29031da10665d..0000000000000
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.repositories.s3;
-
-import fixture.aws.DynamicAwsCredentials;
-import fixture.aws.imds.Ec2ImdsHttpFixture;
-import fixture.aws.imds.Ec2ImdsServiceBuilder;
-import fixture.aws.imds.Ec2ImdsVersion;
-import fixture.s3.S3HttpFixture;
-
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
-
-import org.elasticsearch.test.cluster.ElasticsearchCluster;
-import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter;
-import org.junit.ClassRule;
-import org.junit.rules.RuleChain;
-import org.junit.rules.TestRule;
-
-@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
-@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482
-public class RepositoryS3ImdsV1CredentialsRestIT extends AbstractRepositoryS3RestTestCase {
-
- private static final String PREFIX = getIdentifierPrefix("RepositoryS3ImdsV1CredentialsRestIT");
- private static final String BUCKET = PREFIX + "bucket";
- private static final String BASE_PATH = PREFIX + "base_path";
- private static final String CLIENT = "imdsv1_credentials_client";
-
- private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3");
-
- private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture(
- new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicCredentials::addValidCredentials)
- );
-
- private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized);
-
- public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
- .module("repository-s3")
- .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress)
- .systemProperty(Ec2ImdsHttpFixture.ENDPOINT_OVERRIDE_SYSPROP_NAME, ec2ImdsHttpFixture::getAddress)
- .build();
-
- @ClassRule
- public static TestRule ruleChain = RuleChain.outerRule(ec2ImdsHttpFixture).around(s3Fixture).around(cluster);
-
- @Override
- protected String getTestRestCluster() {
- return cluster.getHttpAddresses();
- }
-
- @Override
- protected String getBucketName() {
- return BUCKET;
- }
-
- @Override
- protected String getBasePath() {
- return BASE_PATH;
- }
-
- @Override
- protected String getClientName() {
- return CLIENT;
- }
-}
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java
index d6fc86a0afe34..20e53561cdbc8 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java
@@ -10,6 +10,7 @@
package org.elasticsearch.repositories.s3;
import fixture.aws.DynamicAwsCredentials;
+import fixture.aws.DynamicRegionSupplier;
import fixture.aws.imds.Ec2ImdsHttpFixture;
import fixture.aws.imds.Ec2ImdsServiceBuilder;
import fixture.aws.imds.Ec2ImdsVersion;
@@ -24,6 +25,8 @@
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
+import java.util.function.Supplier;
+
@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482
public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3RestTestCase {
@@ -33,10 +36,12 @@ public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3Res
private static final String BASE_PATH = PREFIX + "base_path";
private static final String CLIENT = "imdsv2_credentials_client";
- private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3");
+ private static final Supplier regionSupplier = new DynamicRegionSupplier();
+ private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(regionSupplier, "s3");
private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture(
new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).newCredentialsConsumer(dynamicCredentials::addValidCredentials)
+ .instanceIdentityDocument((b, p) -> b.field("region", regionSupplier.get()))
);
private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized);
@@ -44,7 +49,7 @@ public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3Res
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("repository-s3")
.setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress)
- .systemProperty(Ec2ImdsHttpFixture.ENDPOINT_OVERRIDE_SYSPROP_NAME, ec2ImdsHttpFixture::getAddress)
+ .systemProperty(Ec2ImdsHttpFixture.ENDPOINT_OVERRIDE_SYSPROP_NAME_SDK2, ec2ImdsHttpFixture::getAddress)
.build();
@ClassRule
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java
index 0faea267a4221..28bd88da8ddd3 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java
@@ -92,10 +92,7 @@ public void testReloadCredentialsFromKeystore() throws IOException {
// Ensure that initial credentials now invalid
final var accessDeniedException2 = expectThrows(ResponseException.class, () -> client().performRequest(verifyRequest));
assertThat(accessDeniedException2.getResponse().getStatusLine().getStatusCode(), equalTo(500));
- assertThat(
- accessDeniedException2.getMessage(),
- allOf(containsString("Access denied"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied"))
- );
+ assertThat(accessDeniedException2.getMessage(), allOf(containsString("Access denied"), containsString("Status Code: 403")));
// Set up refreshed credentials
keystoreSettings.put("s3.client.default.access_key", accessKey2);
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java
index a58645363b0e9..324a6d7e01f7a 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java
@@ -10,7 +10,9 @@
package org.elasticsearch.repositories.s3;
import fixture.aws.DynamicAwsCredentials;
+import fixture.aws.DynamicRegionSupplier;
import fixture.aws.sts.AwsStsHttpFixture;
+import fixture.aws.sts.AwsStsHttpHandler;
import fixture.s3.S3HttpFixture;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@@ -23,6 +25,8 @@
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
+import java.util.function.Supplier;
+
@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482
public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTestCase {
@@ -32,7 +36,8 @@ public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTe
private static final String BASE_PATH = PREFIX + "base_path";
private static final String CLIENT = "sts_credentials_client";
- private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3");
+ private static final Supplier regionSupplier = new DynamicRegionSupplier();
+ private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(regionSupplier, "s3");
private static final S3HttpFixture s3HttpFixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized);
@@ -49,19 +54,22 @@ public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTe
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("repository-s3")
.setting("s3.client." + CLIENT + ".endpoint", s3HttpFixture::getAddress)
- .systemProperty(
- "com.amazonaws.sdk.stsMetadataServiceEndpointOverride",
- () -> stsHttpFixture.getAddress() + "/assume-role-with-web-identity"
- )
+ .systemProperty("org.elasticsearch.repositories.s3.stsEndpointOverride", stsHttpFixture::getAddress)
.configFile(
S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION,
Resource.fromString(WEB_IDENTITY_TOKEN_FILE_CONTENTS)
)
- .environment("AWS_WEB_IDENTITY_TOKEN_FILE", S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION)
- // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the
- // S3HttpFixtureWithSTS fixture
- .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole")
- .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test")
+ // When running in EKS with container identity the environment variable `AWS_WEB_IDENTITY_TOKEN_FILE` will point to a file which
+ // ES cannot access due to its security policy; we override it with `${ES_CONF_PATH}/repository-s3/aws-web-identity-token-file`
+ // and require the user to set up a symlink at this location. Thus we can set `AWS_WEB_IDENTITY_TOKEN_FILE` to any old path:
+ .environment("AWS_WEB_IDENTITY_TOKEN_FILE", () -> randomIdentifier() + "/" + randomIdentifier())
+ // The AWS STS SDK requires the role ARN, it also accepts a session name but will make one up if it's not set.
+ // These are checked in AwsStsHttpHandler:
+ .environment("AWS_ROLE_ARN", AwsStsHttpHandler.ROLE_ARN)
+ .environment("AWS_ROLE_SESSION_NAME", AwsStsHttpHandler.ROLE_NAME)
+ // SDKv2 always uses regional endpoints
+ .environment("AWS_STS_REGIONAL_ENDPOINTS", () -> randomBoolean() ? "regional" : null)
+ .environment("AWS_REGION", regionSupplier)
.build();
@ClassRule
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java
index e098422ab8b98..90666284a25a3 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java
@@ -9,22 +9,24 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3Client;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.services.s3.S3Client;
import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.Releasable;
/**
- * Handles the shutdown of the wrapped {@link AmazonS3Client} using reference
- * counting.
+ * Handles the shutdown of the wrapped {@link S3Client} using reference counting.
*/
public class AmazonS3Reference extends AbstractRefCounted implements Releasable {
- private final AmazonS3 client;
+ private final S3Client client;
+ /** The S3Client shutdown logic does not handle shutdown of the HttpClient passed into it. So we must manually handle that. */
+ private final SdkHttpClient httpClient;
- AmazonS3Reference(AmazonS3 client) {
+ AmazonS3Reference(S3Client client, SdkHttpClient httpClient) {
this.client = client;
+ this.httpClient = httpClient;
}
/**
@@ -39,13 +41,14 @@ public void close() {
* Returns the underlying `AmazonS3` client. All method calls are permitted BUT
* NOT shutdown. Shutdown is called when reference count reaches 0.
*/
- public AmazonS3 client() {
+ public S3Client client() {
return client;
}
@Override
protected void closeInternal() {
- client.shutdown();
+ client.close();
+ httpClient.close();
}
}
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/HttpScheme.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/HttpScheme.java
new file mode 100644
index 0000000000000..cbfe3780f2c8c
--- /dev/null
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/HttpScheme.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.repositories.s3;
+
+public enum HttpScheme {
+ HTTP("http"),
+ HTTPS("https");
+
+ private final String schemeString;
+
+ HttpScheme(String schemeString) {
+ this.schemeString = schemeString;
+ }
+
+ public String getSchemeString() {
+ return schemeString;
+ }
+}
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesser.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesser.java
new file mode 100644
index 0000000000000..6de784c7a7dc2
--- /dev/null
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesser.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.repositories.s3;
+
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.SuppressForbidden;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Simple mapping from S3 endpoint hostnames to AWS region names, in case the user does not specify a region. This allows Elasticsearch to
+ * guess an appropriate AWS region name, and keep working, if a user does not specify one. This is a best-effort attempt for backwards
+ * compatibility: AWS SDK V1 would extrapolate the correct region but V2 considers it a fatal error not to be told the region name
+ * explicitly.
+ *
+ * The mapping is loaded from the JAR resource named {@code regions_by_endpoint.txt}.
+ *
+ */
+class RegionFromEndpointGuesser {
+ private static final Map regionsByEndpoint;
+
+ static {
+ try (
+ var resourceStream = readFromJarResourceUrl(RegionFromEndpointGuesser.class.getResource("regions_by_endpoint.txt"));
+ var reader = new BufferedReader(new InputStreamReader(resourceStream, StandardCharsets.UTF_8))
+ ) {
+ final var builder = new HashMap();
+ while (true) {
+ final var line = reader.readLine();
+ if (line == null) {
+ break;
+ }
+ final var parts = line.split(" +");
+ if (parts.length != 2) {
+ throw new IllegalStateException("invalid regions_by_endpoint.txt line: " + line);
+ }
+ builder.put(parts[1], parts[0]);
+ }
+ regionsByEndpoint = Map.copyOf(builder);
+ } catch (Exception e) {
+ assert false : e;
+ throw new IllegalStateException("could not read regions_by_endpoint.txt", e);
+ }
+ }
+
+ @SuppressForbidden(reason = "reads resource from jar")
+ private static InputStream readFromJarResourceUrl(URL source) throws IOException {
+ if (source == null) {
+ throw new FileNotFoundException("links resource not found at [" + source + "]");
+ }
+ return source.openStream();
+ }
+
+ /**
+ * @return a guess at the region name for the given S3 endpoint, or {@code null} if the endpoint is not recognised.
+ */
+ @Nullable
+ static String guessRegion(@Nullable String endpoint) {
+ if (endpoint == null) {
+ return null;
+ }
+
+ if (endpoint.startsWith("https://")) {
+ endpoint = endpoint.substring("https://".length());
+ }
+
+ return regionsByEndpoint.get(endpoint);
+ }
+
+}
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java
deleted file mode 100644
index ab9ba64d1fa69..0000000000000
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-package org.elasticsearch.repositories.s3;
-
-import com.amazonaws.auth.AWSCredentials;
-
-import java.util.Objects;
-
-class S3BasicCredentials implements AWSCredentials {
-
- private final String accessKey;
-
- private final String secretKey;
-
- S3BasicCredentials(String accessKey, String secretKey) {
- this.accessKey = accessKey;
- this.secretKey = secretKey;
- }
-
- @Override
- public final String getAWSAccessKeyId() {
- return accessKey;
- }
-
- @Override
- public final String getAWSSecretKey() {
- return secretKey;
- }
-
- @Override
- public boolean equals(final Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- final S3BasicCredentials that = (S3BasicCredentials) o;
- return accessKey.equals(that.accessKey) && secretKey.equals(that.secretKey);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(accessKey, secretKey);
- }
-}
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java
deleted file mode 100644
index 0dee56938c408..0000000000000
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-package org.elasticsearch.repositories.s3;
-
-import com.amazonaws.auth.AWSSessionCredentials;
-
-import java.util.Objects;
-
-final class S3BasicSessionCredentials extends S3BasicCredentials implements AWSSessionCredentials {
-
- private final String sessionToken;
-
- S3BasicSessionCredentials(String accessKey, String secretKey, String sessionToken) {
- super(accessKey, secretKey);
- this.sessionToken = sessionToken;
- }
-
- @Override
- public String getSessionToken() {
- return sessionToken;
- }
-
- @Override
- public boolean equals(final Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- final S3BasicSessionCredentials that = (S3BasicSessionCredentials) o;
- return sessionToken.equals(that.sessionToken)
- && getAWSAccessKeyId().equals(that.getAWSAccessKeyId())
- && getAWSSecretKey().equals(that.getAWSSecretKey());
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(sessionToken, getAWSAccessKeyId(), getAWSSecretKey());
- }
-}
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
index 6f9ac01929eca..8d0878e811883 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
@@ -9,28 +9,29 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.AmazonServiceException;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.AmazonS3Exception;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CopyObjectRequest;
-import com.amazonaws.services.s3.model.CopyPartRequest;
-import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
-import com.amazonaws.services.s3.model.GetObjectRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ListMultipartUploadsRequest;
-import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest;
-import com.amazonaws.services.s3.model.ListObjectsRequest;
-import com.amazonaws.services.s3.model.MultipartUpload;
-import com.amazonaws.services.s3.model.ObjectListing;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.amazonaws.services.s3.model.UploadPartResult;
-import com.amazonaws.util.ValidationUtils;
+import software.amazon.awssdk.awscore.exception.AwsServiceException;
+import software.amazon.awssdk.core.exception.SdkException;
+import software.amazon.awssdk.core.exception.SdkServiceException;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompletedPart;
+import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
+import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
+import software.amazon.awssdk.services.s3.model.MultipartUpload;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+import software.amazon.awssdk.services.s3.model.SdkPartType;
+import software.amazon.awssdk.services.s3.model.ServerSideEncryption;
+import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartResponse;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
@@ -76,12 +77,11 @@
import java.nio.file.NoSuchFileException;
import java.time.Instant;
import java.util.ArrayList;
-import java.util.Date;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Function;
import java.util.stream.Collectors;
import static org.elasticsearch.common.blobstore.support.BlobContainerUtils.getRegisterUsingConsistentRead;
@@ -165,77 +165,94 @@ public void writeMetadataBlob(
) throws IOException {
assert purpose != OperationPurpose.SNAPSHOT_DATA && BlobContainer.assertPurposeConsistency(purpose, blobName) : purpose;
final String absoluteBlobKey = buildKey(blobName);
- try (ChunkedBlobOutputStream out = new ChunkedBlobOutputStream<>(blobStore.bigArrays(), blobStore.bufferSizeInBytes()) {
+ try (
+ ChunkedBlobOutputStream out = new ChunkedBlobOutputStream<>(
+ blobStore.bigArrays(),
+ blobStore.bufferSizeInBytes()
+ ) {
- private final SetOnce uploadId = new SetOnce<>();
+ private final SetOnce uploadId = new SetOnce<>();
- @Override
- protected void flushBuffer() throws IOException {
- flushBuffer(false);
- }
-
- private void flushBuffer(boolean lastPart) throws IOException {
- if (buffer.size() == 0) {
- return;
+ @Override
+ protected void flushBuffer() throws IOException {
+ flushBuffer(false);
}
- if (flushedBytes == 0L) {
- assert lastPart == false : "use single part upload if there's only a single part";
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- uploadId.set(
- SocketAccess.doPrivileged(
- () -> clientReference.client()
- .initiateMultipartUpload(initiateMultiPartUpload(purpose, absoluteBlobKey))
- .getUploadId()
- )
- );
+
+ private void flushBuffer(boolean lastPart) throws IOException {
+ if (buffer.size() == 0) {
+ return;
}
- if (Strings.isEmpty(uploadId.get())) {
- throw new IOException("Failed to initialize multipart upload " + absoluteBlobKey);
+ if (flushedBytes == 0L) {
+ assert lastPart == false : "use single part upload if there's only a single part";
+ try (var clientReference = blobStore.clientReference()) {
+ uploadId.set(
+ SocketAccess.doPrivileged(
+ () -> clientReference.client()
+ .createMultipartUpload(
+ createMultipartUpload(purpose, Operation.PUT_MULTIPART_OBJECT, absoluteBlobKey)
+ )
+ .uploadId()
+ )
+ );
+ }
+ if (Strings.isEmpty(uploadId.get())) {
+ throw new IOException("Failed to initialize multipart upload " + absoluteBlobKey);
+ }
}
- }
- assert lastPart == false || successful : "must only write last part if successful";
- final UploadPartRequest uploadRequest = createPartUploadRequest(
- purpose,
- buffer.bytes().streamInput(),
- uploadId.get(),
- parts.size() + 1,
- absoluteBlobKey,
- buffer.size(),
- lastPart
- );
- final UploadPartResult uploadResponse;
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest));
- }
- finishPart(uploadResponse.getPartETag());
- }
-
- @Override
- protected void onCompletion() throws IOException {
- if (flushedBytes == 0L) {
- writeBlob(purpose, blobName, buffer.bytes(), failIfAlreadyExists);
- } else {
- flushBuffer(true);
- final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(
- blobStore.bucket(),
- absoluteBlobKey,
+ assert lastPart == false || successful : "must only write last part if successful";
+ final UploadPartRequest uploadRequest = createPartUploadRequest(
+ purpose,
uploadId.get(),
- parts
+ parts.size() + 1,
+ absoluteBlobKey,
+ buffer.size(),
+ lastPart
);
- S3BlobStore.configureRequestForMetrics(complRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest));
+ final InputStream partContentStream = buffer.bytes().streamInput();
+ final UploadPartResponse uploadResponse;
+ try (var clientReference = blobStore.clientReference()) {
+ uploadResponse = SocketAccess.doPrivileged(
+ () -> clientReference.client()
+ .uploadPart(uploadRequest, RequestBody.fromInputStream(partContentStream, buffer.size()))
+ );
}
+ finishPart(CompletedPart.builder().partNumber(parts.size() + 1).eTag(uploadResponse.eTag()).build());
}
- }
- @Override
- protected void onFailure() {
- if (Strings.hasText(uploadId.get())) {
- abortMultiPartUpload(purpose, uploadId.get(), absoluteBlobKey);
+ @Override
+ protected void onCompletion() throws IOException {
+ if (flushedBytes == 0L) {
+ writeBlob(purpose, blobName, buffer.bytes(), failIfAlreadyExists);
+ } else {
+ flushBuffer(true);
+ final var completeMultipartUploadRequestBuilder = CompleteMultipartUploadRequest.builder()
+ .bucket(blobStore.bucket())
+ .key(absoluteBlobKey)
+ .uploadId(uploadId.get())
+ .multipartUpload(b -> b.parts(parts));
+ S3BlobStore.configureRequestForMetrics(
+ completeMultipartUploadRequestBuilder,
+ blobStore,
+ Operation.PUT_MULTIPART_OBJECT,
+ purpose
+ );
+ final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build();
+ try (var clientReference = blobStore.clientReference()) {
+ SocketAccess.doPrivilegedVoid(
+ () -> clientReference.client().completeMultipartUpload(completeMultipartUploadRequest)
+ );
+ }
+ }
+ }
+
+ @Override
+ protected void onFailure() {
+ if (Strings.hasText(uploadId.get())) {
+ abortMultiPartUpload(purpose, uploadId.get(), absoluteBlobKey);
+ }
}
}
- }) {
+ ) {
writer.accept(out);
out.markSuccess();
}
@@ -244,14 +261,12 @@ protected void onFailure() {
// This method is largely copied from AmazonS3Client#doesObjectExist with the ability to instrument the getObjectMetadataRequest
private boolean doesObjectExist(OperationPurpose purpose, AmazonS3Reference clientReference, String bucketName, String objectName) {
try {
- ValidationUtils.assertStringNotEmpty(bucketName, "bucketName");
- ValidationUtils.assertStringNotEmpty(objectName, "objectName");
- final var getObjectMetadataRequest = new GetObjectMetadataRequest(bucketName, objectName);
- S3BlobStore.configureRequestForMetrics(getObjectMetadataRequest, blobStore, Operation.HEAD_OBJECT, purpose);
- clientReference.client().getObjectMetadata(getObjectMetadataRequest);
+ final var headObjectRequestBuilder = HeadObjectRequest.builder().bucket(bucketName).key(objectName);
+ S3BlobStore.configureRequestForMetrics(headObjectRequestBuilder, blobStore, Operation.HEAD_OBJECT, purpose);
+ clientReference.client().headObject(headObjectRequestBuilder.build());
return true;
- } catch (AmazonS3Exception e) {
- if (e.getStatusCode() == 404) {
+ } catch (S3Exception e) {
+ if (e.statusCode() == 404) {
return false;
}
throw e;
@@ -260,44 +275,46 @@ private boolean doesObjectExist(OperationPurpose purpose, AmazonS3Reference clie
private UploadPartRequest createPartUploadRequest(
OperationPurpose purpose,
- InputStream stream,
String uploadId,
int number,
String blobName,
long size,
boolean lastPart
) {
- final UploadPartRequest uploadRequest = new UploadPartRequest();
- uploadRequest.setBucketName(blobStore.bucket());
- uploadRequest.setKey(blobName);
- uploadRequest.setUploadId(uploadId);
- uploadRequest.setPartNumber(number);
- uploadRequest.setInputStream(stream);
- S3BlobStore.configureRequestForMetrics(uploadRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
- uploadRequest.setPartSize(size);
- uploadRequest.setLastPart(lastPart);
- return uploadRequest;
+ final var uploadPartRequestBuilder = UploadPartRequest.builder();
+ uploadPartRequestBuilder.bucket(blobStore.bucket());
+ uploadPartRequestBuilder.key(blobName);
+ uploadPartRequestBuilder.uploadId(uploadId);
+ uploadPartRequestBuilder.partNumber(number);
+ uploadPartRequestBuilder.contentLength(size);
+ uploadPartRequestBuilder.sdkPartType(lastPart ? SdkPartType.LAST : SdkPartType.DEFAULT);
+ S3BlobStore.configureRequestForMetrics(uploadPartRequestBuilder, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
+ return uploadPartRequestBuilder.build();
}
private void abortMultiPartUpload(OperationPurpose purpose, String uploadId, String blobName) {
- final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(blobStore.bucket(), blobName, uploadId);
- S3BlobStore.configureRequestForMetrics(abortRequest, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose);
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest));
+ final var abortMultipartUploadRequestBuilder = AbortMultipartUploadRequest.builder()
+ .bucket(blobStore.bucket())
+ .key(blobName)
+ .uploadId(uploadId);
+ S3BlobStore.configureRequestForMetrics(abortMultipartUploadRequestBuilder, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose);
+ final var abortMultipartUploadRequest = abortMultipartUploadRequestBuilder.build();
+ try (var clientReference = blobStore.clientReference()) {
+ SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest));
}
}
- private InitiateMultipartUploadRequest initiateMultiPartUpload(OperationPurpose purpose, String blobName) {
- final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(blobStore.bucket(), blobName);
- initRequest.setStorageClass(blobStore.getStorageClass());
- initRequest.setCannedACL(blobStore.getCannedACL());
- S3BlobStore.configureRequestForMetrics(initRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
+ private CreateMultipartUploadRequest createMultipartUpload(OperationPurpose purpose, Operation operation, String blobName) {
+ final var createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder()
+ .bucket(blobStore.bucket())
+ .key(blobName)
+ .storageClass(blobStore.getStorageClass())
+ .acl(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
- final ObjectMetadata md = new ObjectMetadata();
- md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
- initRequest.setObjectMetadata(md);
+ createMultipartUploadRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256);
}
- return initRequest;
+ S3BlobStore.configureRequestForMetrics(createMultipartUploadRequestBuilder, blobStore, operation, purpose);
+ return createMultipartUploadRequestBuilder.build();
}
// package private for testing
@@ -364,23 +381,23 @@ public void copyBlob(
} else {
// metadata is inherited from source, but not canned ACL or storage class
final var blobKey = buildKey(blobName);
- final CopyObjectRequest copyRequest = new CopyObjectRequest(
- s3SourceBlobContainer.blobStore.bucket(),
- s3SourceBlobContainer.buildKey(sourceBlobName),
- blobStore.bucket(),
- blobKey
- ).withCannedAccessControlList(blobStore.getCannedACL()).withStorageClass(blobStore.getStorageClass());
-
- S3BlobStore.configureRequestForMetrics(copyRequest, blobStore, Operation.COPY_OBJECT, purpose);
-
+ final CopyObjectRequest.Builder copyObjectRequestBuilder = CopyObjectRequest.builder()
+ .sourceBucket(s3SourceBlobContainer.blobStore.bucket())
+ .sourceKey(s3SourceBlobContainer.buildKey(sourceBlobName))
+ .destinationBucket(blobStore.bucket())
+ .destinationKey(blobKey)
+ .acl(blobStore.getCannedACL())
+ .storageClass(blobStore.getStorageClass());
+ S3BlobStore.configureRequestForMetrics(copyObjectRequestBuilder, blobStore, Operation.COPY_OBJECT, purpose);
+ final var copyObjectRequest = copyObjectRequestBuilder.build();
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- SocketAccess.doPrivilegedVoid(() -> { clientReference.client().copyObject(copyRequest); });
+ SocketAccess.doPrivilegedVoid(() -> clientReference.client().copyObject(copyObjectRequest));
}
}
- } catch (final AmazonClientException e) {
- if (e instanceof AmazonServiceException ase && ase.getStatusCode() == RestStatus.NOT_FOUND.getStatus()) {
+ } catch (final Exception e) {
+ if (e instanceof SdkServiceException sse && sse.statusCode() == RestStatus.NOT_FOUND.getStatus()) {
throw new NoSuchFileException(
- "Copy source [" + s3SourceBlobContainer.buildKey(sourceBlobName) + "] not found: " + ase.getMessage()
+ "Copy source [" + s3SourceBlobContainer.buildKey(sourceBlobName) + "] not found: " + sse.getMessage()
);
}
throw new IOException("Unable to copy object [" + blobName + "] from [" + sourceBlobContainer + "][" + sourceBlobName + "]", e);
@@ -391,35 +408,30 @@ public void copyBlob(
public DeleteResult delete(OperationPurpose purpose) throws IOException {
final AtomicLong deletedBlobs = new AtomicLong();
final AtomicLong deletedBytes = new AtomicLong();
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- ObjectListing prevListing = null;
+ try (var clientReference = blobStore.clientReference()) {
+ ListObjectsV2Response prevListing = null;
while (true) {
- final ObjectListing list;
+ final var listObjectsRequestBuilder = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath);
+ S3BlobStore.configureRequestForMetrics(listObjectsRequestBuilder, blobStore, Operation.LIST_OBJECTS, purpose);
if (prevListing != null) {
- final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing);
- S3BlobStore.configureRequestForMetrics(listNextBatchOfObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
- list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest));
- } else {
- final ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
- listObjectsRequest.setBucketName(blobStore.bucket());
- listObjectsRequest.setPrefix(keyPath);
- S3BlobStore.configureRequestForMetrics(listObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
- list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
+ listObjectsRequestBuilder.continuationToken(prevListing.nextContinuationToken());
}
- final Iterator blobNameIterator = Iterators.map(list.getObjectSummaries().iterator(), summary -> {
+ final var listObjectsRequest = listObjectsRequestBuilder.build();
+ final var listObjectsResponse = SocketAccess.doPrivileged(() -> clientReference.client().listObjectsV2(listObjectsRequest));
+ final Iterator blobNameIterator = Iterators.map(listObjectsResponse.contents().iterator(), s3Object -> {
deletedBlobs.incrementAndGet();
- deletedBytes.addAndGet(summary.getSize());
- return summary.getKey();
+ deletedBytes.addAndGet(s3Object.size());
+ return s3Object.key();
});
- if (list.isTruncated()) {
+ if (listObjectsResponse.isTruncated()) {
blobStore.deleteBlobs(purpose, blobNameIterator);
- prevListing = list;
+ prevListing = listObjectsResponse;
} else {
blobStore.deleteBlobs(purpose, Iterators.concat(blobNameIterator, Iterators.single(keyPath)));
break;
}
}
- } catch (final AmazonClientException e) {
+ } catch (final SdkException e) {
throw new IOException("Exception when deleting blob container [" + keyPath + "]", e);
}
return new DeleteResult(deletedBlobs.get(), deletedBytes.get());
@@ -433,12 +445,21 @@ public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator listBlobsByPrefix(OperationPurpose purpose, @Nullable String blobNamePrefix) throws IOException {
try {
- return executeListing(purpose, listObjectsRequest(purpose, blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix)))
- .stream()
- .flatMap(listing -> listing.getObjectSummaries().stream())
- .map(summary -> new BlobMetadata(summary.getKey().substring(keyPath.length()), summary.getSize()))
- .collect(Collectors.toMap(BlobMetadata::name, Function.identity()));
- } catch (final AmazonClientException e) {
+ final var results = new HashMap();
+ final var iterator = executeListing(purpose, blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix));
+ while (iterator.hasNext()) {
+ final var currentPage = iterator.next();
+ for (final var s3Object : currentPage.contents()) {
+ final var blobName = s3Object.key().substring(keyPath.length());
+ if (results.put(blobName, new BlobMetadata(blobName, s3Object.size())) != null) {
+ throw new IllegalStateException(
+ "listing objects by prefix [" + blobNamePrefix + "] yielded multiple blobs with key [" + s3Object.key() + "]"
+ );
+ }
+ }
+ }
+ return results;
+ } catch (final SdkException e) {
throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e);
}
}
@@ -451,57 +472,75 @@ public Map listBlobs(OperationPurpose purpose) throws IOEx
@Override
public Map children(OperationPurpose purpose) throws IOException {
try {
- return executeListing(purpose, listObjectsRequest(purpose, keyPath)).stream().flatMap(listing -> {
- assert listing.getObjectSummaries().stream().noneMatch(s -> {
- for (String commonPrefix : listing.getCommonPrefixes()) {
- if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) {
- return true;
- }
+ final var results = new HashMap();
+ final var relativePrefixStart = keyPath.length();
+ final var iterator = executeListing(purpose, keyPath);
+ while (iterator.hasNext()) {
+ final var currentPage = iterator.next();
+ for (final var commonPrefix : currentPage.commonPrefixes()) {
+ final var absolutePrefix = commonPrefix.prefix();
+ if (absolutePrefix.length() <= relativePrefixStart + 1) {
+ continue;
}
- return false;
- }) : "Response contained children for listed common prefixes.";
- return listing.getCommonPrefixes().stream();
- })
- .map(prefix -> prefix.substring(keyPath.length()))
- .filter(name -> name.isEmpty() == false)
- // Stripping the trailing slash off of the common prefix
- .map(name -> name.substring(0, name.length() - 1))
- .collect(Collectors.toMap(Function.identity(), name -> blobStore.blobContainer(path().add(name))));
- } catch (final AmazonClientException e) {
+ final var relativePrefix = absolutePrefix.substring(relativePrefixStart, absolutePrefix.length() - 1);
+ assert relativePrefix.isEmpty() == false;
+ assert currentPage.contents().stream().noneMatch(s3Object -> s3Object.key().startsWith(absolutePrefix))
+ : "Response contained children for listed common prefix " + absolutePrefix;
+ if (results.put(relativePrefix, blobStore.blobContainer(path().add(relativePrefix))) != null) {
+ throw new IllegalStateException(
+ "listing child containers of [" + keyPath + "] yielded multiple children with key [" + relativePrefix + "]"
+ );
+ }
+ }
+ }
+ return results;
+ } catch (final SdkException e) {
throw new IOException("Exception when listing children of [" + path().buildAsString() + ']', e);
}
}
- private List executeListing(OperationPurpose purpose, ListObjectsRequest listObjectsRequest) {
- final List results = new ArrayList<>();
- ObjectListing prevListing = null;
- while (true) {
- ObjectListing list;
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- if (prevListing != null) {
- final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing);
- S3BlobStore.configureRequestForMetrics(listNextBatchOfObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
- list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest));
- } else {
- list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
- }
+ private Iterator executeListing(OperationPurpose purpose, String pathPrefix) {
+ return new Iterator<>() {
+ @Nullable // if after last page
+ private ListObjectsV2Response nextResponse = listNextObjects(purpose, pathPrefix, null);
+
+ @Override
+ public boolean hasNext() {
+ return nextResponse != null;
}
- results.add(list);
- if (list.isTruncated()) {
- prevListing = list;
- } else {
- break;
+
+ @Override
+ public ListObjectsV2Response next() {
+ final var currentResponse = nextResponse;
+ nextResponse = currentResponse.nextContinuationToken() == null
+ ? null
+ : listNextObjects(purpose, pathPrefix, currentResponse);
+ return currentResponse;
}
- }
- return results;
+ };
}
- private ListObjectsRequest listObjectsRequest(OperationPurpose purpose, String pathPrefix) {
- final ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(blobStore.bucket())
- .withPrefix(pathPrefix)
- .withDelimiter("/");
- S3BlobStore.configureRequestForMetrics(listObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
- return listObjectsRequest;
+ private ListObjectsV2Response listNextObjects(
+ OperationPurpose operationPurpose,
+ String pathPrefix,
+ @Nullable /* if requesting the first page of objects */
+ ListObjectsV2Response previousResponse
+ ) {
+ try (var clientReference = blobStore.clientReference()) {
+ final var listObjectsRequestBuilder = ListObjectsV2Request.builder()
+ .bucket(blobStore.bucket())
+ .prefix(pathPrefix)
+ .delimiter("/");
+ if (previousResponse != null) {
+ if (previousResponse.nextContinuationToken() == null) {
+ throw new IllegalStateException("cannot request next page of object listing without a continuation token");
+ }
+ listObjectsRequestBuilder.continuationToken(previousResponse.nextContinuationToken());
+ }
+ S3BlobStore.configureRequestForMetrics(listObjectsRequestBuilder, blobStore, Operation.LIST_OBJECTS, operationPurpose);
+ final var listObjectsRequest = listObjectsRequestBuilder.build();
+ return SocketAccess.doPrivileged(() -> clientReference.client().listObjectsV2(listObjectsRequest));
+ }
}
// exposed for tests
@@ -519,39 +558,43 @@ void executeSingleUpload(
final InputStream input,
final long blobSize
) throws IOException {
+ try (var clientReference = s3BlobStore.clientReference()) {
+ // Extra safety checks
+ if (blobSize > MAX_FILE_SIZE.getBytes()) {
+ throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
+ }
+ if (blobSize > s3BlobStore.bufferSizeInBytes()) {
+ throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
+ }
- // Extra safety checks
- if (blobSize > MAX_FILE_SIZE.getBytes()) {
- throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
- }
- if (blobSize > s3BlobStore.bufferSizeInBytes()) {
- throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
- }
+ final var putRequestBuilder = PutObjectRequest.builder()
+ .bucket(s3BlobStore.bucket())
+ .key(blobName)
+ .contentLength(blobSize)
+ .storageClass(s3BlobStore.getStorageClass())
+ .acl(s3BlobStore.getCannedACL());
+ if (s3BlobStore.serverSideEncryption()) {
+ putRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256);
+ }
+ S3BlobStore.configureRequestForMetrics(putRequestBuilder, blobStore, Operation.PUT_OBJECT, purpose);
- final ObjectMetadata md = new ObjectMetadata();
- md.setContentLength(blobSize);
- if (s3BlobStore.serverSideEncryption()) {
- md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
- }
- final PutObjectRequest putRequest = new PutObjectRequest(s3BlobStore.bucket(), blobName, input, md);
- putRequest.setStorageClass(s3BlobStore.getStorageClass());
- putRequest.setCannedAcl(s3BlobStore.getCannedACL());
- S3BlobStore.configureRequestForMetrics(putRequest, blobStore, Operation.PUT_OBJECT, purpose);
-
- try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) {
- SocketAccess.doPrivilegedVoid(() -> clientReference.client().putObject(putRequest));
- } catch (final AmazonClientException e) {
+ final var putRequest = putRequestBuilder.build();
+ SocketAccess.doPrivilegedVoid(
+ () -> clientReference.client().putObject(putRequest, RequestBody.fromInputStream(input, blobSize))
+ );
+ } catch (final SdkException e) {
throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
}
}
private interface PartOperation {
- PartETag doPart(String uploadId, int partNum, long partSize, boolean lastPart);
+ CompletedPart doPart(String uploadId, int partNum, long partSize, boolean lastPart);
}
// for copy, blobName and s3BlobStore are the destination
private void executeMultipart(
final OperationPurpose purpose,
+ final Operation operation,
final S3BlobStore s3BlobStore,
final String blobName,
final long partSize,
@@ -570,28 +613,27 @@ private void executeMultipart(
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
- final SetOnce uploadId = new SetOnce<>();
+ final List cleanupOnFailureActions = new ArrayList<>(1);
final String bucketName = s3BlobStore.bucket();
- boolean success = false;
try {
+ final String uploadId;
try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) {
- uploadId.set(
- SocketAccess.doPrivileged(
- () -> clientReference.client().initiateMultipartUpload(initiateMultiPartUpload(purpose, blobName)).getUploadId()
- )
+ uploadId = SocketAccess.doPrivileged(
+ () -> clientReference.client().createMultipartUpload(createMultipartUpload(purpose, operation, blobName)).uploadId()
);
+ cleanupOnFailureActions.add(() -> abortMultiPartUpload(purpose, uploadId, blobName));
}
- if (Strings.isEmpty(uploadId.get())) {
+ if (Strings.isEmpty(uploadId)) {
throw new IOException("Failed to initialize multipart operation for " + blobName);
}
- final List parts = new ArrayList<>();
+ final List parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final boolean lastPart = i == nbParts;
final var curPartSize = lastPart ? lastPartSize : partSize;
- final var partEtag = partOperation.doPart(uploadId.get(), i, curPartSize, lastPart);
+ final var partEtag = partOperation.doPart(uploadId, i, curPartSize, lastPart);
bytesCount += curPartSize;
parts.add(partEtag);
}
@@ -607,26 +649,24 @@ private void executeMultipart(
);
}
- final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(
- bucketName,
- blobName,
- uploadId.get(),
- parts
- );
- S3BlobStore.configureRequestForMetrics(complRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
- try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) {
- SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest));
+ final var completeMultipartUploadRequestBuilder = CompleteMultipartUploadRequest.builder()
+ .bucket(bucketName)
+ .key(blobName)
+ .uploadId(uploadId)
+ .multipartUpload(b -> b.parts(parts));
+ S3BlobStore.configureRequestForMetrics(completeMultipartUploadRequestBuilder, blobStore, operation, purpose);
+ final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build();
+ try (var clientReference = s3BlobStore.clientReference()) {
+ SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(completeMultipartUploadRequest));
}
- success = true;
- } catch (final AmazonClientException e) {
- if (e instanceof AmazonServiceException ase && ase.getStatusCode() == RestStatus.NOT_FOUND.getStatus()) {
+ cleanupOnFailureActions.clear();
+ } catch (final Exception e) {
+ if (e instanceof SdkServiceException sse && sse.statusCode() == RestStatus.NOT_FOUND.getStatus()) {
throw new NoSuchFileException(blobName, null, e.getMessage());
}
throw new IOException("Unable to upload or copy object [" + blobName + "] using multipart upload", e);
} finally {
- if ((success == false) && Strings.hasLength(uploadId.get())) {
- abortMultiPartUpload(purpose, uploadId.get(), blobName);
- }
+ cleanupOnFailureActions.forEach(Runnable::run);
}
}
@@ -642,26 +682,20 @@ void executeMultipartUpload(
) throws IOException {
executeMultipart(
purpose,
+ Operation.PUT_MULTIPART_OBJECT,
s3BlobStore,
blobName,
s3BlobStore.bufferSizeInBytes(),
blobSize,
(uploadId, partNum, partSize, lastPart) -> {
- final UploadPartRequest uploadRequest = createPartUploadRequest(
- purpose,
- input,
- uploadId,
- partNum,
- blobName,
- partSize,
- lastPart
- );
+ final UploadPartRequest uploadRequest = createPartUploadRequest(purpose, uploadId, partNum, blobName, partSize, lastPart);
- try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) {
- final UploadPartResult uploadResponse = SocketAccess.doPrivileged(
- () -> clientReference.client().uploadPart(uploadRequest)
+ try (var clientReference = s3BlobStore.clientReference()) {
+ final UploadPartResponse uploadResponse = SocketAccess.doPrivileged(
+ () -> clientReference.client().uploadPart(uploadRequest, RequestBody.fromInputStream(input, partSize))
);
- return uploadResponse.getPartETag();
+
+ return CompletedPart.builder().partNumber(partNum).eTag(uploadResponse.eTag()).build();
}
}
);
@@ -686,23 +720,34 @@ void executeMultipartCopy(
) throws IOException {
final long copyPartSize = MAX_FILE_SIZE.getBytes();
final var destinationKey = buildKey(destinationBlobName);
- executeMultipart(purpose, blobStore, destinationKey, copyPartSize, blobSize, ((uploadId, partNum, partSize, lastPart) -> {
- final long startOffset = (partNum - 1) * copyPartSize;
- final var request = new CopyPartRequest().withSourceBucketName(sourceContainer.blobStore.bucket())
- .withSourceKey(sourceContainer.buildKey(sourceBlobName))
- .withDestinationBucketName(blobStore.bucket())
- .withDestinationKey(destinationKey)
- .withUploadId(uploadId)
- .withPartNumber(partNum)
- .withFirstByte(startOffset)
- .withLastByte(startOffset + partSize - 1);
- S3BlobStore.configureRequestForMetrics(request, blobStore, Operation.COPY_MULTIPART_OBJECT, purpose);
-
- try (AmazonS3Reference clientReference = blobStore.clientReference()) {
- final var result = SocketAccess.doPrivileged(() -> clientReference.client().copyPart(request));
- return result.getPartETag();
- }
- }));
+ executeMultipart(
+ purpose,
+ Operation.COPY_MULTIPART_OBJECT,
+ blobStore,
+ destinationKey,
+ copyPartSize,
+ blobSize,
+ ((uploadId, partNum, partSize, lastPart) -> {
+ final long startOffset = (partNum - 1) * copyPartSize;
+ final var uploadPartCopyRequestBuilder = UploadPartCopyRequest.builder()
+ .sourceBucket(sourceContainer.blobStore.bucket())
+ .sourceKey(sourceContainer.buildKey(sourceBlobName))
+ .destinationBucket(blobStore.bucket())
+ .destinationKey(destinationKey)
+ .uploadId(uploadId)
+ .partNumber(partNum)
+ .copySourceRange("bytes=" + startOffset + "-" + (startOffset + partSize - 1));
+ S3BlobStore.configureRequestForMetrics(uploadPartCopyRequestBuilder, blobStore, Operation.COPY_MULTIPART_OBJECT, purpose);
+ final var uploadPartCopyRequest = uploadPartCopyRequestBuilder.build();
+
+ try (AmazonS3Reference clientReference = blobStore.clientReference()) {
+ final var uploadPartCopyResponse = SocketAccess.doPrivileged(
+ () -> clientReference.client().uploadPartCopy(uploadPartCopyRequest)
+ );
+ return CompletedPart.builder().partNumber(partNum).eTag(uploadPartCopyResponse.copyPartResult().eTag()).build();
+ }
+ })
+ );
}
// non-static, package private for testing
@@ -750,13 +795,13 @@ static Tuple numberOfMultiparts(final long totalSize, final long par
private class CompareAndExchangeOperation {
private final OperationPurpose purpose;
- private final AmazonS3 client;
+ private final S3Client client;
private final String bucket;
private final String rawKey;
private final String blobKey;
private final ThreadPool threadPool;
- CompareAndExchangeOperation(OperationPurpose purpose, AmazonS3 client, String bucket, String key, ThreadPool threadPool) {
+ CompareAndExchangeOperation(OperationPurpose purpose, S3Client client, String bucket, String key, ThreadPool threadPool) {
this.purpose = purpose;
this.client = client;
this.bucket = bucket;
@@ -779,9 +824,9 @@ void run(BytesReference expected, BytesReference updated, ActionListener upload.getInitiated().after(expiryDate))) {
+ final var expiryDate = Instant.ofEpochMilli(blobStore.getThreadPool().absoluteTimeInMillis() - timeToLiveMillis);
+ if (uploads.stream().anyMatch(upload -> upload.initiated().compareTo(expiryDate) > 0)) {
logger.trace("[{}] fresh preexisting uploads vs {}", blobKey, expiryDate);
return true;
}
// there are uploads, but they are all older than the TTL, so clean them up before carrying on (should be rare)
for (final var upload : uploads) {
- logger.warn(
- "cleaning up stale compare-and-swap upload [{}] initiated at [{}]",
- upload.getUploadId(),
- upload.getInitiated()
- );
- safeAbortMultipartUpload(upload.getUploadId());
+ logger.warn("cleaning up stale compare-and-swap upload [{}] initiated at [{}]", upload.uploadId(), upload.initiated());
+ safeAbortMultipartUpload(upload.uploadId());
}
logger.trace("[{}] stale preexisting uploads vs {}", blobKey, expiryDate);
@@ -882,53 +923,57 @@ private void logUploads(String description, List uploads) {
blobKey,
description,
uploads.stream()
- .map(multipartUpload -> multipartUpload.getUploadId() + ": " + multipartUpload.getInitiated())
+ .map(multipartUpload -> multipartUpload.uploadId() + ": " + multipartUpload.initiated())
.collect(Collectors.joining(","))
);
}
}
private List listMultipartUploads() {
- final var listRequest = new ListMultipartUploadsRequest(bucket);
- listRequest.setPrefix(blobKey);
- S3BlobStore.configureRequestForMetrics(listRequest, blobStore, Operation.LIST_OBJECTS, purpose);
+ final var listRequestBuilder = ListMultipartUploadsRequest.builder().bucket(bucket).prefix(blobKey);
+ S3BlobStore.configureRequestForMetrics(listRequestBuilder, blobStore, Operation.LIST_OBJECTS, purpose);
+ final var listRequest = listRequestBuilder.build();
try {
- return SocketAccess.doPrivileged(() -> client.listMultipartUploads(listRequest)).getMultipartUploads();
- } catch (AmazonS3Exception e) {
- if (e.getStatusCode() == 404) {
+ return SocketAccess.doPrivileged(() -> client.listMultipartUploads(listRequest)).uploads();
+ } catch (SdkServiceException e) {
+ if (e.statusCode() == 404) {
return List.of();
}
throw e;
}
}
- private String initiateMultipartUpload() {
- final var initiateRequest = new InitiateMultipartUploadRequest(bucket, blobKey);
- S3BlobStore.configureRequestForMetrics(initiateRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
- return SocketAccess.doPrivileged(() -> client.initiateMultipartUpload(initiateRequest)).getUploadId();
+ private String createMultipartUpload() {
+ final var createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder().bucket(bucket).key(blobKey);
+ S3BlobStore.configureRequestForMetrics(createMultipartUploadRequestBuilder, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
+ final var createMultipartUploadRequest = createMultipartUploadRequestBuilder.build();
+ return SocketAccess.doPrivileged(() -> client.createMultipartUpload(createMultipartUploadRequest)).uploadId();
}
- private PartETag uploadPart(BytesReference updated, String uploadId) throws IOException {
- final var uploadPartRequest = new UploadPartRequest();
- uploadPartRequest.setBucketName(bucket);
- uploadPartRequest.setKey(blobKey);
- uploadPartRequest.setUploadId(uploadId);
- uploadPartRequest.setPartNumber(1);
- uploadPartRequest.setLastPart(true);
- uploadPartRequest.setInputStream(updated.streamInput());
- uploadPartRequest.setPartSize(updated.length());
- S3BlobStore.configureRequestForMetrics(uploadPartRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
- return SocketAccess.doPrivileged(() -> client.uploadPart(uploadPartRequest)).getPartETag();
+ private String uploadPartAndGetEtag(BytesReference updated, String uploadId) throws IOException {
+ final var uploadPartRequestBuilder = UploadPartRequest.builder();
+ uploadPartRequestBuilder.bucket(bucket);
+ uploadPartRequestBuilder.key(blobKey);
+ uploadPartRequestBuilder.uploadId(uploadId);
+ uploadPartRequestBuilder.partNumber(1);
+ uploadPartRequestBuilder.sdkPartType(SdkPartType.LAST);
+ S3BlobStore.configureRequestForMetrics(uploadPartRequestBuilder, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
+ return SocketAccess.doPrivilegedIOException(
+ () -> client.uploadPart(
+ uploadPartRequestBuilder.build(),
+ RequestBody.fromInputStream(updated.streamInput(), updated.length())
+ )
+ ).eTag();
}
private int getUploadIndex(String targetUploadId, List multipartUploads) {
var uploadIndex = 0;
var found = false;
- for (MultipartUpload multipartUpload : multipartUploads) {
- final var observedUploadId = multipartUpload.getUploadId();
+ for (final var multipartUpload : multipartUploads) {
+ final var observedUploadId = multipartUpload.uploadId();
if (observedUploadId.equals(targetUploadId)) {
final var currentTimeMillis = blobStore.getThreadPool().absoluteTimeInMillis();
- final var ageMillis = currentTimeMillis - multipartUpload.getInitiated().toInstant().toEpochMilli();
+ final var ageMillis = currentTimeMillis - multipartUpload.initiated().toEpochMilli();
final var expectedAgeRangeMillis = blobStore.getCompareAndExchangeTimeToLive().millis();
if (0 <= expectedAgeRangeMillis && (ageMillis < -expectedAgeRangeMillis || ageMillis > expectedAgeRangeMillis)) {
logger.warn(
@@ -937,8 +982,8 @@ private int getUploadIndex(String targetUploadId, List multipar
which deviates from local node epoch time [{}] by more than the warn threshold of [{}ms]""",
bucket,
blobKey,
- multipartUpload.getInitiated(),
- multipartUpload.getInitiated().toInstant().toEpochMilli(),
+ multipartUpload.initiated(),
+ multipartUpload.initiated().toEpochMilli(),
currentTimeMillis,
expectedAgeRangeMillis
);
@@ -991,7 +1036,7 @@ private void cancelOtherUploads(String uploadId, List currentUp
final var executor = blobStore.getSnapshotExecutor();
try (var listeners = new RefCountingListener(listener)) {
for (final var currentUpload : currentUploads) {
- final var currentUploadId = currentUpload.getUploadId();
+ final var currentUploadId = currentUpload.uploadId();
if (uploadId.equals(currentUploadId) == false) {
executor.execute(ActionRunnable.run(listeners.acquire(), () -> abortMultipartUploadIfExists(currentUploadId)));
}
@@ -1010,20 +1055,39 @@ private void safeAbortMultipartUpload(String uploadId) {
private void abortMultipartUploadIfExists(String uploadId) {
try {
- final var request = new AbortMultipartUploadRequest(bucket, blobKey, uploadId);
- S3BlobStore.configureRequestForMetrics(request, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose);
- SocketAccess.doPrivilegedVoid(() -> client.abortMultipartUpload(request));
- } catch (AmazonS3Exception e) {
- if (e.getStatusCode() != 404) {
+ final var abortMultipartUploadRequestBuilder = AbortMultipartUploadRequest.builder()
+ .bucket(bucket)
+ .key(blobKey)
+ .uploadId(uploadId);
+ S3BlobStore.configureRequestForMetrics(
+ abortMultipartUploadRequestBuilder,
+ blobStore,
+ Operation.ABORT_MULTIPART_OBJECT,
+ purpose
+ );
+ final var abortMultipartUploadRequest = abortMultipartUploadRequestBuilder.build();
+ SocketAccess.doPrivilegedVoid(() -> client.abortMultipartUpload(abortMultipartUploadRequest));
+ } catch (SdkServiceException e) {
+ if (e.statusCode() != 404) {
throw e;
}
// else already aborted
}
}
- private void completeMultipartUpload(String uploadId, PartETag partETag) {
- final var completeMultipartUploadRequest = new CompleteMultipartUploadRequest(bucket, blobKey, uploadId, List.of(partETag));
- S3BlobStore.configureRequestForMetrics(completeMultipartUploadRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
+ private void completeMultipartUpload(String uploadId, String partETag) {
+ final var completeMultipartUploadRequestBuilder = CompleteMultipartUploadRequest.builder()
+ .bucket(bucket)
+ .key(blobKey)
+ .uploadId(uploadId)
+ .multipartUpload(b -> b.parts(CompletedPart.builder().partNumber(1).eTag(partETag).build()));
+ S3BlobStore.configureRequestForMetrics(
+ completeMultipartUploadRequestBuilder,
+ blobStore,
+ Operation.PUT_MULTIPART_OBJECT,
+ purpose
+ );
+ final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build();
SocketAccess.doPrivilegedVoid(() -> client.completeMultipartUpload(completeMultipartUploadRequest));
}
}
@@ -1039,9 +1103,10 @@ public void compareAndExchangeRegister(
final var clientReference = blobStore.clientReference();
ActionListener.run(ActionListener.releaseAfter(listener.delegateResponse((delegate, e) -> {
logger.trace(() -> Strings.format("[%s]: compareAndExchangeRegister failed", key), e);
- if (e instanceof AmazonS3Exception amazonS3Exception
- && (amazonS3Exception.getStatusCode() == 404
- || amazonS3Exception.getStatusCode() == 200 && "NoSuchUpload".equals(amazonS3Exception.getErrorCode()))) {
+ if (e instanceof AwsServiceException awsServiceException
+ && (awsServiceException.statusCode() == 404
+ || awsServiceException.statusCode() == 200
+ && "NoSuchUpload".equals(awsServiceException.awsErrorDetails().errorCode()))) {
// An uncaught 404 means that our multipart upload was aborted by a concurrent operation before we could complete it.
// Also (rarely) S3 can start processing the request during a concurrent abort and this can result in a 200 OK with an
// NoSuchUpload... in the response. Either way, this means that our write encountered contention:
@@ -1068,17 +1133,17 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener clientReference.client().getObject(getObjectRequest));
- var stream = s3Object.getObjectContent()
) {
- return OptionalBytesReference.of(getRegisterUsingConsistentRead(stream, keyPath, key));
+ return OptionalBytesReference.of(getRegisterUsingConsistentRead(s3Object, keyPath, key));
} catch (Exception attemptException) {
logger.trace(() -> Strings.format("[%s]: getRegister failed", key), attemptException);
- if (attemptException instanceof AmazonS3Exception amazonS3Exception && amazonS3Exception.getStatusCode() == 404) {
+ if (attemptException instanceof SdkServiceException sdkException && sdkException.statusCode() == 404) {
return OptionalBytesReference.EMPTY;
} else if (finalException == null) {
finalException = attemptException;
@@ -1106,10 +1171,19 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCountingRunnable refs) {
try (var clientReference = blobStore.clientReference()) {
final var bucket = blobStore.bucket();
- final var request = new ListMultipartUploadsRequest(bucket).withPrefix(keyPath).withMaxUploads(maxUploads);
- request.putCustomQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey());
- final var multipartUploadListing = SocketAccess.doPrivileged(() -> clientReference.client().listMultipartUploads(request));
- final var multipartUploads = multipartUploadListing.getMultipartUploads();
+ final var listMultipartUploadsRequest = ListMultipartUploadsRequest.builder()
+ .bucket(bucket)
+ .prefix(keyPath)
+ .maxUploads(maxUploads)
+ // TODO adjust to use S3BlobStore.configureRequestForMetrics, adding metrics collection
+ .overrideConfiguration(
+ b -> b.putRawQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey())
+ )
+ .build();
+ final var multipartUploadListing = SocketAccess.doPrivileged(
+ () -> clientReference.client().listMultipartUploads(listMultipartUploadsRequest)
+ );
+ final var multipartUploads = multipartUploadListing.uploads();
if (multipartUploads.isEmpty()) {
logger.debug("found no multipart uploads to clean up");
return ActionListener.noop();
@@ -1128,7 +1202,21 @@ ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCounti
}
return newMultipartUploadCleanupListener(
refs,
- multipartUploads.stream().map(u -> new AbortMultipartUploadRequest(bucket, u.getKey(), u.getUploadId())).toList()
+ Iterators.map(
+ multipartUploads.iterator(),
+ u -> AbortMultipartUploadRequest.builder()
+ .bucket(bucket)
+ .key(u.key())
+ .uploadId(u.uploadId())
+ // TODO adjust to use S3BlobStore.configureRequestForMetrics, adding metrics collection
+ .overrideConfiguration(
+ b -> b.putRawQueryParameter(
+ S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE,
+ OperationPurpose.SNAPSHOT_DATA.getKey()
+ )
+ )
+ .build()
+ )
);
}
} catch (Exception e) {
@@ -1140,25 +1228,22 @@ ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCounti
private ActionListener newMultipartUploadCleanupListener(
RefCountingRunnable refs,
- List abortMultipartUploadRequests
+ Iterator abortMultipartUploadRequestIterator
) {
return new ThreadedActionListener<>(blobStore.getSnapshotExecutor(), ActionListener.releaseAfter(new ActionListener<>() {
@Override
public void onResponse(Void unused) {
try (var clientReference = blobStore.clientReference()) {
- for (final var abortMultipartUploadRequest : abortMultipartUploadRequests) {
- abortMultipartUploadRequest.putCustomQueryParameter(
- S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE,
- OperationPurpose.SNAPSHOT_DATA.getKey()
- );
+ while (abortMultipartUploadRequestIterator.hasNext()) {
+ final var abortMultipartUploadRequest = abortMultipartUploadRequestIterator.next();
try {
SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest));
logger.info(
"cleaned up dangling multipart upload [{}] of blob [{}][{}][{}]",
- abortMultipartUploadRequest.getUploadId(),
+ abortMultipartUploadRequest.uploadId(),
blobStore.getRepositoryMetadata().name(),
- abortMultipartUploadRequest.getBucketName(),
- abortMultipartUploadRequest.getKey()
+ abortMultipartUploadRequest.bucket(),
+ abortMultipartUploadRequest.key()
);
} catch (Exception e) {
// Cleanup is a best-effort thing, we can't do anything better than log and carry on here. Note that any failure
@@ -1167,10 +1252,10 @@ public void onResponse(Void unused) {
logger.warn(
Strings.format(
"failed to clean up multipart upload [%s] of blob [%s][%s][%s]",
- abortMultipartUploadRequest.getUploadId(),
+ abortMultipartUploadRequest.uploadId(),
blobStore.getRepositoryMetadata().name(),
- abortMultipartUploadRequest.getBucketName(),
- abortMultipartUploadRequest.getKey()
+ abortMultipartUploadRequest.bucket(),
+ abortMultipartUploadRequest.key()
),
e
);
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
index 129de029daf7a..42e675efad202 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
@@ -9,21 +9,22 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.AmazonWebServiceRequest;
-import com.amazonaws.Request;
-import com.amazonaws.Response;
-import com.amazonaws.metrics.RequestMetricCollector;
-import com.amazonaws.retry.RetryUtils;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.MultiObjectDeleteException;
-import com.amazonaws.services.s3.model.StorageClass;
-import com.amazonaws.util.AWSRequestMetrics;
-import com.amazonaws.util.TimingInfo;
+import software.amazon.awssdk.awscore.AwsRequest;
+import software.amazon.awssdk.core.exception.SdkException;
+import software.amazon.awssdk.core.metrics.CoreMetric;
+import software.amazon.awssdk.core.retry.RetryUtils;
+import software.amazon.awssdk.http.HttpMetric;
+import software.amazon.awssdk.metrics.MetricCollection;
+import software.amazon.awssdk.metrics.MetricPublisher;
+import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
+import software.amazon.awssdk.services.s3.model.ObjectCannedACL;
+import software.amazon.awssdk.services.s3.model.ObjectIdentifier;
+import software.amazon.awssdk.services.s3.model.S3Error;
+import software.amazon.awssdk.services.s3.model.StorageClass;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.common.BackoffPolicy;
@@ -38,6 +39,7 @@
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.repositories.RepositoriesMetrics;
+import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
@@ -48,15 +50,14 @@
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
-import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.LongAdder;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
-import static org.elasticsearch.rest.RestStatus.REQUESTED_RANGE_NOT_SATISFIED;
-
class S3BlobStore implements BlobStore {
public static final String CUSTOM_QUERY_PARAMETER_COPY_SOURCE = "x-amz-copy-source";
@@ -84,7 +85,7 @@ class S3BlobStore implements BlobStore {
private final boolean serverSideEncryption;
- private final CannedAccessControlList cannedACL;
+ private final ObjectCannedACL cannedACL;
private final StorageClass storageClass;
@@ -132,8 +133,8 @@ class S3BlobStore implements BlobStore {
this.getRegisterRetryDelay = S3Repository.GET_REGISTER_RETRY_DELAY.get(repositoryMetadata.settings());
}
- RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) {
- return statsCollectors.getMetricCollector(operation, purpose);
+ MetricPublisher getMetricPublisher(Operation operation, OperationPurpose purpose) {
+ return statsCollectors.getMetricPublisher(operation, purpose);
}
public Executor getSnapshotExecutor() {
@@ -149,9 +150,9 @@ public TimeValue getCompareAndExchangeAntiContentionDelay() {
}
/**
- * A {@link RequestMetricCollector} that processes the metrics related to each API invocation attempt according to Elasticsearch's needs
+ * A {@link MetricPublisher} that processes the metrics related to each API invocation attempt according to Elasticsearch's needs
*/
- class ElasticsearchS3MetricsCollector extends RequestMetricCollector {
+ class ElasticsearchS3MetricsCollector implements MetricPublisher {
final LongAdder requests = new LongAdder();
final LongAdder operations = new LongAdder();
@@ -168,140 +169,82 @@ BlobStoreActionStats getEndpointStats() {
}
@Override
- public final void collectMetrics(Request> request, Response> response) {
- assert assertConsistencyBetweenHttpRequestAndOperation(request, operation);
- final AWSRequestMetrics awsRequestMetrics = request.getAWSRequestMetrics();
- final TimingInfo timingInfo = awsRequestMetrics.getTimingInfo();
- final long requestCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.RequestCount);
- final long exceptionCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.Exception);
- final long throttleCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.ThrottleException);
-
- // For stats reported by API, do not collect stats for null response for BWC.
- // See https://github.com/elastic/elasticsearch/pull/71406
- // TODO Is this BWC really necessary?
- // This behaviour needs to be updated, see https://elasticco.atlassian.net/browse/ES-10223
- if (response != null) {
- requests.add(requestCount);
+ public void publish(MetricCollection metricCollection) {
+ assert operation.assertConsistentOperationName(metricCollection);
+
+ boolean overallSuccess = false;
+ for (final var successMetricValue : metricCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)) {
+ // The API allows for multiple success flags but in practice there should be only one; check they're all true for safety:
+ if (Boolean.TRUE.equals(successMetricValue)) {
+ overallSuccess = true; // but keep checking just in case
+ } else {
+ overallSuccess = false;
+ break;
+ }
}
- // We collect all metrics regardless whether response is null
- // There are many situations other than network where a null response can be returned.
- // In addition, we are interested in the stats when there is a network outage.
- final int numberOfAwsErrors = Optional.ofNullable(awsRequestMetrics.getProperty(AWSRequestMetrics.Field.AWSErrorCode))
- .map(List::size)
- .orElse(0);
-
- if (exceptionCount > 0) {
- final List