diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 70c1a0c66b39d..46687b3062143 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -17,7 +17,6 @@ jna = 5.12.1 netty = 4.1.118.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 -awsv1sdk = 1.12.746 awsv2sdk = 2.30.38 reactive_streams = 1.0.4 diff --git a/docs/changelog/126843.yaml b/docs/changelog/126843.yaml new file mode 100644 index 0000000000000..77d3916c31955 --- /dev/null +++ b/docs/changelog/126843.yaml @@ -0,0 +1,90 @@ +pr: 126843 +summary: Upgrade `repository-s3` to AWS SDK v2 +area: Snapshot/Restore +type: breaking +issues: + - 120993 +highlight: + title: Upgrade `repository-s3` to AWS SDK v2 + body: >- + In earlier versions of {es} the `repository-s3` plugin was based on the AWS + SDK v1. AWS will withdraw support for this SDK before the end of the life + of {es} {minor-version} so we have migrated this plugin to the newer AWS SDK v2. + + The two SDKs are not quite compatible, so please check the breaking changes + documentation and test the new version thoroughly before upgrading any + production workloads. + notable: true +breaking: + title: Upgrade `repository-s3` to AWS SDK v2 + area: Cluster and node setting + details: >- + In earlier versions of {es} the `repository-s3` plugin was based on the AWS + SDK v1. AWS will withdraw support for this SDK before the end of the life + of {es} {minor-version} so we must migrate to the newer AWS SDK v2. + + Unfortunately there are several differences between the two AWS SDK + versions which may require you to adjust your system configuration when + upgrading to {es} {minor-version} or later. These differences include, but + may not be limited to, the following items. + + * AWS SDK v2 requires users to specify the region to use for signing + requests, or else to run in an environment in which it can determine the + correct region automatically. The older SDK would try to determine the + region based on the endpoint URL as specified with the + `s3.client.${CLIENT_NAME}.endpoint` setting, together with other data + drawn from the operating environment, and would ultimately fall back to + `us-east-1` if no better value could be found. + + * AWS SDK v2 does not support the EC2 IMDSv1 protocol. + + * AWS SDK v2 does not support the + `com.amazonaws.sdk.ec2MetadataServiceEndpointOverride` system property. + + * AWS SDK v2 does not permit specifying a choice between HTTP and HTTPS so + the `s3.client.${CLIENT_NAME}.protocol` setting is deprecated and no longer + has any effect. + + * AWS SDK v2 does not permit control over throttling for retries, so the + the `s3.client.${CLIENT_NAME}.use_throttle_retries` setting is deprecated + and no longer has any effect. + + * AWS SDK v2 requires the use of the V4 signature algorithm, so the + `s3.client.${CLIENT_NAME}.signer_override` setting is deprecated and no + longer has any effect. + + * AWS SDK v2 does not support the `log-delivery-write` canned ACL. + + * AWS SDK v2 counts 4xx responses differently in its metrics reporting. + + * AWS SDK v2 always uses the regional STS endpoint, whereas AWS SDK v2 + could use either a regional endpoint or the global + `https://sts.amazonaws.com` one. + + impact: >- + If you use the `repository-s3` module, test your upgrade thoroughly before + upgrading any production workloads. + + Adapt your configuration to the new SDK functionality. This includes, but + may not be limited to, the following items. + + * Specify the correct signing region using the + `s3.client.${CLIENT_NAME}.region` setting on each node. {es} will try and + determine the correct region based on the endpoint URL and other data + drawn from the operating environment but cannot guarantee to do so + correctly in all cases. + + * If you use IMDS to determine the availability zone of a node or to obtain + credentials for accessing the EC2 API, ensure that it supports the IMDSv2 + protocol. + + * If applicable, discontinue use of the + `com.amazonaws.sdk.ec2MetadataServiceEndpointOverride` system property. + + * If applicable, specify that you wish to use the insecure HTTP protocol to + access the S3 API by setting `s3.client.${CLIENT_NAME}.endpoint` to a URL + which starts with `http://`. + + * If applicable, discontinue use of the `log-delivery-write` canned ACL. + + notable: true diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index b602b44c52adf..38d06dddabb3a 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -86,36 +86,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -4682,6 +4652,11 @@ + + + + + @@ -4702,6 +4677,11 @@ + + + + + @@ -4802,11 +4782,21 @@ + + + + + + + + + + diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index e03ca78de5e0b..413d667192d81 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -20,27 +20,49 @@ esplugin { } dependencies { - api "com.amazonaws:aws-java-sdk-s3:${versions.awsv1sdk}" - api "com.amazonaws:aws-java-sdk-core:${versions.awsv1sdk}" - api "com.amazonaws:aws-java-sdk-sts:${versions.awsv1sdk}" - api "com.amazonaws:jmespath-java:${versions.awsv1sdk}" - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" - api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" - api "commons-codec:commons-codec:${versions.commonscodec}" - api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" - api "joda-time:joda-time:2.10.14" - - // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, - // and whitelist this hack in JarHell - api 'javax.xml.bind:jaxb-api:2.2.2' + implementation "software.amazon.awssdk:annotations:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:apache-client:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:auth:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:aws-core:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:aws-xml-protocol:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:http-client-spi:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:identity-spi:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:metrics-spi:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:regions:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:retries-spi:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:retries:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:s3:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:sdk-core:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:services:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:sts:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:utils:${versions.awsv2sdk}" + + implementation "org.apache.httpcomponents:httpclient:${versions.httpclient}" + + runtimeOnly "commons-codec:commons-codec:${versions.commonscodec}" + runtimeOnly "commons-logging:commons-logging:${versions.commonslogging}" + runtimeOnly "joda-time:joda-time:2.10.14" + runtimeOnly "org.apache.httpcomponents:httpcore:${versions.httpcore}" + runtimeOnly "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + runtimeOnly "org.reactivestreams:reactive-streams:${versions.reactive_streams}" + runtimeOnly "org.slf4j:slf4j-api:${versions.slf4j}" + runtimeOnly "software.amazon.awssdk:arns:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:aws-query-protocol:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:checksums-spi:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:checksums:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:endpoints-spi:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:http-auth:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:http-auth-aws:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:http-auth-spi:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:json-utils:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:profiles:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:protocol-core:${versions.awsv2sdk}" + runtimeOnly "software.amazon.awssdk:third-party-jackson-core:${versions.awsv2sdk}" testImplementation project(':test:fixtures:s3-fixture') + testImplementation "software.amazon.awssdk:endpoints-spi:${versions.awsv2sdk}" + internalClusterTestImplementation project(':test:fixtures:aws-fixture-utils') internalClusterTestImplementation project(':test:fixtures:minio-fixture') internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" @@ -69,10 +91,34 @@ restResources { } tasks.named("dependencyLicenses").configure { - mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' - mapping from: /jmespath-java.*/, to: 'aws-java-sdk' - mapping from: /jackson-.*/, to: 'jackson' - mapping from: /jaxb-.*/, to: 'jaxb' + mapping from: 'annotations', to: 'aws-sdk-2' + mapping from: 'apache-client', to: 'aws-sdk-2' + mapping from: 'arns', to: 'aws-sdk-2' + mapping from: 'auth', to: 'aws-sdk-2' + mapping from: 'aws-core', to: 'aws-sdk-2' + mapping from: 'aws-query-protocol', to: 'aws-sdk-2' + mapping from: 'aws-xml-protocol', to: 'aws-sdk-2' + mapping from: 'checksums', to: 'aws-sdk-2' + mapping from: 'checksums-spi', to: 'aws-sdk-2' + mapping from: 'endpoints-spi', to: 'aws-sdk-2' + mapping from: 'http-auth', to: 'aws-sdk-2' + mapping from: 'http-auth-aws', to: 'aws-sdk-2' + mapping from: 'http-auth-spi', to: 'aws-sdk-2' + mapping from: 'http-client-spi', to: 'aws-sdk-2' + mapping from: 'identity-spi', to: 'aws-sdk-2' + mapping from: 'json-utils', to: 'aws-sdk-2' + mapping from: 'metrics-spi', to: 'aws-sdk-2' + mapping from: 'profiles', to: 'aws-sdk-2' + mapping from: 'protocol-core', to: 'aws-sdk-2' + mapping from: 'regions', to: 'aws-sdk-2' + mapping from: 'retries', to: 'aws-sdk-2' + mapping from: 'retries-spi', to: 'aws-sdk-2' + mapping from: 's3', to: 'aws-sdk-2' + mapping from: 'sdk-core', to: 'aws-sdk-2' + mapping from: 'services', to: 'aws-sdk-2' + mapping from: 'sts', to: 'aws-sdk-2' + mapping from: 'third-party-jackson-core', to: 'aws-sdk-2' + mapping from: 'utils', to: 'aws-sdk-2' } esplugin.bundleSpec.from('config/repository-s3') { @@ -86,23 +132,61 @@ tasks.named("internalClusterTest").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( - // classes are missing - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'javax.jms.Message', - // We don't use the kms dependency - 'com.amazonaws.services.kms.AWSKMS', - 'com.amazonaws.services.kms.AWSKMSClient', - 'com.amazonaws.services.kms.AWSKMSClientBuilder', - 'com.amazonaws.services.kms.model.DecryptRequest', - 'com.amazonaws.services.kms.model.DecryptResult', - 'com.amazonaws.services.kms.model.EncryptRequest', - 'com.amazonaws.services.kms.model.EncryptResult', - 'com.amazonaws.services.kms.model.GenerateDataKeyRequest', - 'com.amazonaws.services.kms.model.GenerateDataKeyResult', - 'javax.activation.DataHandler' + // missing/unused classes + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'javax.jms.Message', + + // We use the Apache HTTP client rather than an AWS common runtime (CRT) one, so we don't need any of these classes: + 'software.amazon.awssdk.crt.CRT', + 'software.amazon.awssdk.crt.auth.credentials.Credentials', + 'software.amazon.awssdk.crt.auth.credentials.CredentialsProvider', + 'software.amazon.awssdk.crt.auth.credentials.DelegateCredentialsProvider$DelegateCredentialsProviderBuilder', + 'software.amazon.awssdk.crt.auth.signing.AwsSigner', + 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig$AwsSignatureType', + 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig$AwsSignedBodyHeaderType', + 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig$AwsSigningAlgorithm', + 'software.amazon.awssdk.crt.auth.signing.AwsSigningConfig', + 'software.amazon.awssdk.crt.auth.signing.AwsSigningResult', + 'software.amazon.awssdk.crt.http.HttpHeader', + 'software.amazon.awssdk.crt.http.HttpMonitoringOptions', + 'software.amazon.awssdk.crt.http.HttpProxyEnvironmentVariableSetting$HttpProxyEnvironmentVariableType', + 'software.amazon.awssdk.crt.http.HttpProxyEnvironmentVariableSetting', + 'software.amazon.awssdk.crt.http.HttpProxyOptions', + 'software.amazon.awssdk.crt.http.HttpRequest', + 'software.amazon.awssdk.crt.http.HttpRequestBodyStream', + 'software.amazon.awssdk.crt.io.ClientBootstrap', + 'software.amazon.awssdk.crt.io.ExponentialBackoffRetryOptions', + 'software.amazon.awssdk.crt.io.StandardRetryOptions', + 'software.amazon.awssdk.crt.io.TlsCipherPreference', + 'software.amazon.awssdk.crt.io.TlsContext', + 'software.amazon.awssdk.crt.io.TlsContextOptions', + 'software.amazon.awssdk.crt.s3.ChecksumAlgorithm', + 'software.amazon.awssdk.crt.s3.ChecksumConfig$ChecksumLocation', + 'software.amazon.awssdk.crt.s3.ChecksumConfig', + 'software.amazon.awssdk.crt.s3.ResumeToken', + 'software.amazon.awssdk.crt.s3.S3Client', + 'software.amazon.awssdk.crt.s3.S3ClientOptions', + 'software.amazon.awssdk.crt.s3.S3FinishedResponseContext', + 'software.amazon.awssdk.crt.s3.S3MetaRequest', + 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions$MetaRequestType', + 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions', + 'software.amazon.awssdk.crt.s3.S3MetaRequestProgress', + 'software.amazon.awssdk.crt.s3.S3MetaRequestResponseHandler', + 'software.amazon.awssdk.crtcore.CrtConfigurationUtils', + 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$Builder', + 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$DefaultBuilder', + 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration', + 'software.amazon.awssdk.crtcore.CrtProxyConfiguration$Builder', + 'software.amazon.awssdk.crtcore.CrtProxyConfiguration$DefaultBuilder', + 'software.amazon.awssdk.crtcore.CrtProxyConfiguration', + + // We don't use anything eventstream-based so these classes are not needed: + 'software.amazon.eventstream.HeaderValue', + 'software.amazon.eventstream.Message', + 'software.amazon.eventstream.MessageDecoder' ) } diff --git a/modules/repository-s3/licenses/aws-java-sdk-LICENSE.txt b/modules/repository-s3/licenses/aws-java-sdk-LICENSE.txt deleted file mode 100644 index 98d1f9319f374..0000000000000 --- a/modules/repository-s3/licenses/aws-java-sdk-LICENSE.txt +++ /dev/null @@ -1,63 +0,0 @@ -Apache License -Version 2.0, January 2004 - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - - 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and - 2. You must cause any modified files to carry prominent notices stating that You changed the files; and - 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. - -You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above. - -JSON processing code subject to the JSON License from JSON.org: - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -The Software shall be used for Good, not Evil. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/modules/repository-s3/licenses/aws-java-sdk-NOTICE.txt b/modules/repository-s3/licenses/aws-java-sdk-NOTICE.txt deleted file mode 100644 index 565bd6085c71a..0000000000000 --- a/modules/repository-s3/licenses/aws-java-sdk-NOTICE.txt +++ /dev/null @@ -1,15 +0,0 @@ -AWS SDK for Java -Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. - -This product includes software developed by -Amazon Technologies, Inc (http://www.amazon.com/). - -********************** -THIRD PARTY COMPONENTS -********************** -This software includes third party software subject to the following copyrights: -- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. -- JSON parsing and utility functions from JSON.org - Copyright 2002 JSON.org. -- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. - -The licenses for these third party components are included in LICENSE.txt diff --git a/modules/repository-s3/licenses/aws-sdk-2-LICENSE.txt b/modules/repository-s3/licenses/aws-sdk-2-LICENSE.txt new file mode 100644 index 0000000000000..1eef70a9b9f42 --- /dev/null +++ b/modules/repository-s3/licenses/aws-sdk-2-LICENSE.txt @@ -0,0 +1,206 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Note: Other license terms may apply to certain, identified software files contained within or distributed + with the accompanying software if such terms are included in the directory containing the accompanying software. + Such other license terms will then apply in lieu of the terms of the software license above. diff --git a/modules/repository-s3/licenses/aws-sdk-2-NOTICE.txt b/modules/repository-s3/licenses/aws-sdk-2-NOTICE.txt new file mode 100644 index 0000000000000..f3c4db7d1724e --- /dev/null +++ b/modules/repository-s3/licenses/aws-sdk-2-NOTICE.txt @@ -0,0 +1,26 @@ +AWS SDK for Java 2.0 +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. +- Apache Commons Lang - https://github.com/apache/commons-lang +- Netty Reactive Streams - https://github.com/playframework/netty-reactive-streams +- Jackson-core - https://github.com/FasterXML/jackson-core +- Jackson-dataformat-cbor - https://github.com/FasterXML/jackson-dataformats-binary + +The licenses for these third party components are included in LICENSE.txt + +- For Apache Commons Lang see also this required NOTICE: + Apache Commons Lang + Copyright 2001-2020 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (https://www.apache.org/). + diff --git a/modules/repository-s3/licenses/jackson-LICENSE b/modules/repository-s3/licenses/jackson-LICENSE deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/modules/repository-s3/licenses/jackson-LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/modules/repository-s3/licenses/jackson-NOTICE b/modules/repository-s3/licenses/jackson-NOTICE deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/modules/repository-s3/licenses/jackson-NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/modules/repository-s3/licenses/jaxb-LICENSE.txt b/modules/repository-s3/licenses/jaxb-LICENSE.txt deleted file mode 100644 index 833a843cfeee1..0000000000000 --- a/modules/repository-s3/licenses/jaxb-LICENSE.txt +++ /dev/null @@ -1,274 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than Source Code. - - 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - ----------- -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) -The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. diff --git a/modules/repository-s3/licenses/jaxb-NOTICE.txt b/modules/repository-s3/licenses/jaxb-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3fce..0000000000000 --- a/modules/repository-s3/licenses/jaxb-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/modules/repository-s3/licenses/reactive-streams-LICENSE.txt b/modules/repository-s3/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e141c13ddba2 --- /dev/null +++ b/modules/repository-s3/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,7 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/modules/repository-s3/licenses/reactive-streams-NOTICE.txt b/modules/repository-s3/licenses/reactive-streams-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-s3/licenses/slf4j-api-LICENSE.txt b/modules/repository-s3/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..8fda22f4d72f6 --- /dev/null +++ b/modules/repository-s3/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/modules/repository-s3/licenses/slf4j-api-NOTICE.txt b/modules/repository-s3/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-s3/qa/insecure-credentials/build.gradle b/modules/repository-s3/qa/insecure-credentials/build.gradle index 4346e1f4547e1..bac3e00f5dadb 100644 --- a/modules/repository-s3/qa/insecure-credentials/build.gradle +++ b/modules/repository-s3/qa/insecure-credentials/build.gradle @@ -11,6 +11,15 @@ dependencies { testImplementation project(':modules:repository-s3') testImplementation project(':test:framework') testImplementation project(':server') + + testImplementation "software.amazon.awssdk:auth:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:aws-core:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:http-client-spi:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:identity-spi:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:regions:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:s3:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:sdk-core:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:utils:${versions.awsv2sdk}" } tasks.named("test").configure { diff --git a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java index 17b56131938d8..022fe15c03b05 100644 --- a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java +++ b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java @@ -9,778 +9,1710 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonServiceException; -import com.amazonaws.AmazonWebServiceRequest; -import com.amazonaws.HttpMethod; -import com.amazonaws.regions.Region; -import com.amazonaws.services.s3.AbstractAmazonS3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.S3ClientOptions; -import com.amazonaws.services.s3.S3ResponseMetadata; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.AccessControlList; -import com.amazonaws.services.s3.model.Bucket; -import com.amazonaws.services.s3.model.BucketCrossOriginConfiguration; -import com.amazonaws.services.s3.model.BucketLifecycleConfiguration; -import com.amazonaws.services.s3.model.BucketLoggingConfiguration; -import com.amazonaws.services.s3.model.BucketNotificationConfiguration; -import com.amazonaws.services.s3.model.BucketPolicy; -import com.amazonaws.services.s3.model.BucketReplicationConfiguration; -import com.amazonaws.services.s3.model.BucketTaggingConfiguration; -import com.amazonaws.services.s3.model.BucketVersioningConfiguration; -import com.amazonaws.services.s3.model.BucketWebsiteConfiguration; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.CopyObjectRequest; -import com.amazonaws.services.s3.model.CopyObjectResult; -import com.amazonaws.services.s3.model.CopyPartRequest; -import com.amazonaws.services.s3.model.CopyPartResult; -import com.amazonaws.services.s3.model.CreateBucketRequest; -import com.amazonaws.services.s3.model.DeleteBucketCrossOriginConfigurationRequest; -import com.amazonaws.services.s3.model.DeleteBucketLifecycleConfigurationRequest; -import com.amazonaws.services.s3.model.DeleteBucketPolicyRequest; -import com.amazonaws.services.s3.model.DeleteBucketReplicationConfigurationRequest; -import com.amazonaws.services.s3.model.DeleteBucketRequest; -import com.amazonaws.services.s3.model.DeleteBucketTaggingConfigurationRequest; -import com.amazonaws.services.s3.model.DeleteBucketWebsiteConfigurationRequest; -import com.amazonaws.services.s3.model.DeleteObjectRequest; -import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.amazonaws.services.s3.model.DeleteObjectsResult; -import com.amazonaws.services.s3.model.DeleteVersionRequest; -import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest; -import com.amazonaws.services.s3.model.GetBucketAclRequest; -import com.amazonaws.services.s3.model.GetBucketCrossOriginConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketLifecycleConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketLocationRequest; -import com.amazonaws.services.s3.model.GetBucketLoggingConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketNotificationConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketPolicyRequest; -import com.amazonaws.services.s3.model.GetBucketReplicationConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketTaggingConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketVersioningConfigurationRequest; -import com.amazonaws.services.s3.model.GetBucketWebsiteConfigurationRequest; -import com.amazonaws.services.s3.model.GetObjectAclRequest; -import com.amazonaws.services.s3.model.GetObjectMetadataRequest; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.GetS3AccountOwnerRequest; -import com.amazonaws.services.s3.model.HeadBucketRequest; -import com.amazonaws.services.s3.model.HeadBucketResult; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.ListBucketsRequest; -import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; -import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest; -import com.amazonaws.services.s3.model.ListNextBatchOfVersionsRequest; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ListPartsRequest; -import com.amazonaws.services.s3.model.ListVersionsRequest; -import com.amazonaws.services.s3.model.MultipartUploadListing; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.Owner; -import com.amazonaws.services.s3.model.PartListing; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.RestoreObjectRequest; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.SetBucketAclRequest; -import com.amazonaws.services.s3.model.SetBucketCrossOriginConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketLifecycleConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketLoggingConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketNotificationConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketPolicyRequest; -import com.amazonaws.services.s3.model.SetBucketReplicationConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketTaggingConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; -import com.amazonaws.services.s3.model.SetBucketWebsiteConfigurationRequest; -import com.amazonaws.services.s3.model.SetObjectAclRequest; -import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; -import com.amazonaws.services.s3.model.VersionListing; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ServiceClientConfiguration; +import software.amazon.awssdk.services.s3.S3Utilities; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException; +import software.amazon.awssdk.services.s3.model.BucketAlreadyOwnedByYouException; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.CopyObjectResponse; +import software.amazon.awssdk.services.s3.model.CreateBucketMetadataTableConfigurationRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketMetadataTableConfigurationResponse; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketResponse; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CreateSessionRequest; +import software.amazon.awssdk.services.s3.model.CreateSessionResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketCorsRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketCorsResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketMetadataTableConfigurationRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketMetadataTableConfigurationResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingResponse; +import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteResponse; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectResponse; +import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingResponse; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockRequest; +import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockResponse; +import software.amazon.awssdk.services.s3.model.EncryptionTypeMismatchException; +import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketAclRequest; +import software.amazon.awssdk.services.s3.model.GetBucketAclResponse; +import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketCorsRequest; +import software.amazon.awssdk.services.s3.model.GetBucketCorsResponse; +import software.amazon.awssdk.services.s3.model.GetBucketEncryptionRequest; +import software.amazon.awssdk.services.s3.model.GetBucketEncryptionResponse; +import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketLocationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketLocationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketLoggingRequest; +import software.amazon.awssdk.services.s3.model.GetBucketLoggingResponse; +import software.amazon.awssdk.services.s3.model.GetBucketMetadataTableConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketMetadataTableConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsRequest; +import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsResponse; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyRequest; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyResponse; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusRequest; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusResponse; +import software.amazon.awssdk.services.s3.model.GetBucketReplicationRequest; +import software.amazon.awssdk.services.s3.model.GetBucketReplicationResponse; +import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentRequest; +import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentResponse; +import software.amazon.awssdk.services.s3.model.GetBucketTaggingRequest; +import software.amazon.awssdk.services.s3.model.GetBucketTaggingResponse; +import software.amazon.awssdk.services.s3.model.GetBucketVersioningRequest; +import software.amazon.awssdk.services.s3.model.GetBucketVersioningResponse; +import software.amazon.awssdk.services.s3.model.GetBucketWebsiteRequest; +import software.amazon.awssdk.services.s3.model.GetBucketWebsiteResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAclRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAclResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldRequest; +import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldResponse; +import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationRequest; +import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRetentionRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRetentionResponse; +import software.amazon.awssdk.services.s3.model.GetObjectTaggingRequest; +import software.amazon.awssdk.services.s3.model.GetObjectTaggingResponse; +import software.amazon.awssdk.services.s3.model.GetObjectTorrentRequest; +import software.amazon.awssdk.services.s3.model.GetObjectTorrentResponse; +import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockRequest; +import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockResponse; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.InvalidObjectStateException; +import software.amazon.awssdk.services.s3.model.InvalidRequestException; +import software.amazon.awssdk.services.s3.model.InvalidWriteOffsetException; +import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsRequest; +import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsResponse; +import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsRequest; +import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsResponse; +import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsRequest; +import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsResponse; +import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsRequest; +import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsResponse; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; +import software.amazon.awssdk.services.s3.model.ListBucketsResponse; +import software.amazon.awssdk.services.s3.model.ListDirectoryBucketsRequest; +import software.amazon.awssdk.services.s3.model.ListDirectoryBucketsResponse; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.ListPartsRequest; +import software.amazon.awssdk.services.s3.model.ListPartsResponse; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.services.s3.model.NoSuchUploadException; +import software.amazon.awssdk.services.s3.model.ObjectAlreadyInActiveTierErrorException; +import software.amazon.awssdk.services.s3.model.ObjectNotInActiveTierErrorException; +import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketAclRequest; +import software.amazon.awssdk.services.s3.model.PutBucketAclResponse; +import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketCorsRequest; +import software.amazon.awssdk.services.s3.model.PutBucketCorsResponse; +import software.amazon.awssdk.services.s3.model.PutBucketEncryptionRequest; +import software.amazon.awssdk.services.s3.model.PutBucketEncryptionResponse; +import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketLoggingRequest; +import software.amazon.awssdk.services.s3.model.PutBucketLoggingResponse; +import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsRequest; +import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsResponse; +import software.amazon.awssdk.services.s3.model.PutBucketPolicyRequest; +import software.amazon.awssdk.services.s3.model.PutBucketPolicyResponse; +import software.amazon.awssdk.services.s3.model.PutBucketReplicationRequest; +import software.amazon.awssdk.services.s3.model.PutBucketReplicationResponse; +import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentRequest; +import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentResponse; +import software.amazon.awssdk.services.s3.model.PutBucketTaggingRequest; +import software.amazon.awssdk.services.s3.model.PutBucketTaggingResponse; +import software.amazon.awssdk.services.s3.model.PutBucketVersioningRequest; +import software.amazon.awssdk.services.s3.model.PutBucketVersioningResponse; +import software.amazon.awssdk.services.s3.model.PutBucketWebsiteRequest; +import software.amazon.awssdk.services.s3.model.PutBucketWebsiteResponse; +import software.amazon.awssdk.services.s3.model.PutObjectAclRequest; +import software.amazon.awssdk.services.s3.model.PutObjectAclResponse; +import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldRequest; +import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldResponse; +import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationRequest; +import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRetentionRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRetentionResponse; +import software.amazon.awssdk.services.s3.model.PutObjectTaggingRequest; +import software.amazon.awssdk.services.s3.model.PutObjectTaggingResponse; +import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockRequest; +import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockResponse; +import software.amazon.awssdk.services.s3.model.RestoreObjectRequest; +import software.amazon.awssdk.services.s3.model.RestoreObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.TooManyPartsException; +import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; +import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.services.s3.model.WriteGetObjectResponseRequest; +import software.amazon.awssdk.services.s3.model.WriteGetObjectResponseResponse; +import software.amazon.awssdk.services.s3.paginators.ListBucketsIterable; +import software.amazon.awssdk.services.s3.paginators.ListDirectoryBucketsIterable; +import software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsIterable; +import software.amazon.awssdk.services.s3.paginators.ListObjectVersionsIterable; +import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import software.amazon.awssdk.services.s3.paginators.ListPartsIterable; +import software.amazon.awssdk.services.s3.waiters.S3Waiter; import org.elasticsearch.core.SuppressForbidden; -import java.io.File; -import java.io.InputStream; -import java.net.URL; -import java.util.Date; -import java.util.List; +import java.nio.file.Path; +import java.util.function.Consumer; @SuppressForbidden(reason = "implements AWS api that uses java.io.File!") -public class AmazonS3Wrapper extends AbstractAmazonS3 { +public class AmazonS3Wrapper implements S3Client { - protected AmazonS3 delegate; + protected S3Client delegate; - public AmazonS3Wrapper(AmazonS3 delegate) { + public AmazonS3Wrapper(S3Client delegate) { this.delegate = delegate; } @Override - public void setEndpoint(String endpoint) { - delegate.setEndpoint(endpoint); + public void close() { + delegate.close(); } @Override - public void setRegion(Region region) throws IllegalArgumentException { - delegate.setRegion(region); + public String serviceName() { + return "AmazonS3Wrapper"; } @Override - public void setS3ClientOptions(S3ClientOptions clientOptions) { - delegate.setS3ClientOptions(clientOptions); + public AbortMultipartUploadResponse abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest) + throws NoSuchUploadException, AwsServiceException, SdkClientException, S3Exception { + return delegate.abortMultipartUpload(abortMultipartUploadRequest); } @Override - public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException, - AmazonServiceException { - delegate.changeObjectStorageClass(bucketName, key, newStorageClass); + public AbortMultipartUploadResponse abortMultipartUpload(Consumer abortMultipartUploadRequest) + throws NoSuchUploadException, AwsServiceException, SdkClientException, S3Exception { + return delegate.abortMultipartUpload(abortMultipartUploadRequest); } @Override - public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws AmazonClientException, - AmazonServiceException { - delegate.setObjectRedirectLocation(bucketName, key, newRedirectLocation); + public CompleteMultipartUploadResponse completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.completeMultipartUpload(completeMultipartUploadRequest); } @Override - public ObjectListing listObjects(String bucketName) throws AmazonClientException, AmazonServiceException { - return delegate.listObjects(bucketName); + public CompleteMultipartUploadResponse completeMultipartUpload( + Consumer completeMultipartUploadRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.completeMultipartUpload(completeMultipartUploadRequest); } @Override - public ObjectListing listObjects(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException { - return delegate.listObjects(bucketName, prefix); + public CopyObjectResponse copyObject(CopyObjectRequest copyObjectRequest) throws ObjectNotInActiveTierErrorException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.copyObject(copyObjectRequest); } @Override - public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) throws AmazonClientException, AmazonServiceException { - return delegate.listObjects(listObjectsRequest); + public CopyObjectResponse copyObject(Consumer copyObjectRequest) throws ObjectNotInActiveTierErrorException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.copyObject(copyObjectRequest); } @Override - public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws AmazonClientException, AmazonServiceException { - return delegate.listNextBatchOfObjects(previousObjectListing); + public CreateBucketResponse createBucket(CreateBucketRequest createBucketRequest) throws BucketAlreadyExistsException, + BucketAlreadyOwnedByYouException, AwsServiceException, SdkClientException, S3Exception { + return delegate.createBucket(createBucketRequest); } @Override - public VersionListing listVersions(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException { - return delegate.listVersions(bucketName, prefix); + public CreateBucketResponse createBucket(Consumer createBucketRequest) throws BucketAlreadyExistsException, + BucketAlreadyOwnedByYouException, AwsServiceException, SdkClientException, S3Exception { + return delegate.createBucket(createBucketRequest); } @Override - public VersionListing listNextBatchOfVersions(VersionListing previousVersionListing) throws AmazonClientException, - AmazonServiceException { - return delegate.listNextBatchOfVersions(previousVersionListing); + public CreateBucketMetadataTableConfigurationResponse createBucketMetadataTableConfiguration( + CreateBucketMetadataTableConfigurationRequest createBucketMetadataTableConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.createBucketMetadataTableConfiguration(createBucketMetadataTableConfigurationRequest); } @Override - public VersionListing listVersions( - String bucketName, - String prefix, - String keyMarker, - String versionIdMarker, - String delimiter, - Integer maxResults - ) throws AmazonClientException, AmazonServiceException { - return delegate.listVersions(bucketName, prefix, keyMarker, versionIdMarker, delimiter, maxResults); + public CreateBucketMetadataTableConfigurationResponse createBucketMetadataTableConfiguration( + Consumer createBucketMetadataTableConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.createBucketMetadataTableConfiguration(createBucketMetadataTableConfigurationRequest); } @Override - public VersionListing listVersions(ListVersionsRequest listVersionsRequest) throws AmazonClientException, AmazonServiceException { - return delegate.listVersions(listVersionsRequest); + public CreateMultipartUploadResponse createMultipartUpload(CreateMultipartUploadRequest createMultipartUploadRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.createMultipartUpload(createMultipartUploadRequest); } @Override - public Owner getS3AccountOwner() throws AmazonClientException, AmazonServiceException { - return delegate.getS3AccountOwner(); + public CreateMultipartUploadResponse createMultipartUpload(Consumer createMultipartUploadRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.createMultipartUpload(createMultipartUploadRequest); } @Override - public boolean doesBucketExist(String bucketName) throws AmazonClientException, AmazonServiceException { - return delegate.doesBucketExist(bucketName); + public CreateSessionResponse createSession(CreateSessionRequest createSessionRequest) throws NoSuchBucketException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.createSession(createSessionRequest); } @Override - public List listBuckets() throws AmazonClientException, AmazonServiceException { - return delegate.listBuckets(); + public CreateSessionResponse createSession(Consumer createSessionRequest) throws NoSuchBucketException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.createSession(createSessionRequest); } @Override - public List listBuckets(ListBucketsRequest listBucketsRequest) throws AmazonClientException, AmazonServiceException { - return delegate.listBuckets(listBucketsRequest); + public DeleteBucketResponse deleteBucket(DeleteBucketRequest deleteBucketRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.deleteBucket(deleteBucketRequest); } @Override - public String getBucketLocation(String bucketName) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketLocation(bucketName); + public DeleteBucketResponse deleteBucket(Consumer deleteBucketRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.deleteBucket(deleteBucketRequest); } @Override - public String getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) throws AmazonClientException, - AmazonServiceException { - return delegate.getBucketLocation(getBucketLocationRequest); + public DeleteBucketAnalyticsConfigurationResponse deleteBucketAnalyticsConfiguration( + DeleteBucketAnalyticsConfigurationRequest deleteBucketAnalyticsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketAnalyticsConfiguration(deleteBucketAnalyticsConfigurationRequest); } @Override - public Bucket createBucket(CreateBucketRequest createBucketRequest) throws AmazonClientException, AmazonServiceException { - return delegate.createBucket(createBucketRequest); + public DeleteBucketAnalyticsConfigurationResponse deleteBucketAnalyticsConfiguration( + Consumer deleteBucketAnalyticsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketAnalyticsConfiguration(deleteBucketAnalyticsConfigurationRequest); } @Override - public Bucket createBucket(String bucketName) throws AmazonClientException, AmazonServiceException { - return delegate.createBucket(bucketName); + public DeleteBucketCorsResponse deleteBucketCors(DeleteBucketCorsRequest deleteBucketCorsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.deleteBucketCors(deleteBucketCorsRequest); } @Override - public Bucket createBucket(String bucketName, com.amazonaws.services.s3.model.Region region) throws AmazonClientException, - AmazonServiceException { - return delegate.createBucket(bucketName, region); + public DeleteBucketCorsResponse deleteBucketCors(Consumer deleteBucketCorsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketCors(deleteBucketCorsRequest); } @Override - public Bucket createBucket(String bucketName, String region) throws AmazonClientException, AmazonServiceException { - return delegate.createBucket(bucketName, region); + public DeleteBucketEncryptionResponse deleteBucketEncryption(DeleteBucketEncryptionRequest deleteBucketEncryptionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketEncryption(deleteBucketEncryptionRequest); } @Override - public AccessControlList getObjectAcl(String bucketName, String key) throws AmazonClientException, AmazonServiceException { - return delegate.getObjectAcl(bucketName, key); + public DeleteBucketEncryptionResponse deleteBucketEncryption( + Consumer deleteBucketEncryptionRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketEncryption(deleteBucketEncryptionRequest); } @Override - public AccessControlList getObjectAcl(String bucketName, String key, String versionId) throws AmazonClientException, - AmazonServiceException { - return delegate.getObjectAcl(bucketName, key, versionId); + public DeleteBucketIntelligentTieringConfigurationResponse deleteBucketIntelligentTieringConfiguration( + DeleteBucketIntelligentTieringConfigurationRequest deleteBucketIntelligentTieringConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketIntelligentTieringConfiguration(deleteBucketIntelligentTieringConfigurationRequest); } @Override - public AccessControlList getObjectAcl(GetObjectAclRequest getObjectAclRequest) throws AmazonClientException, AmazonServiceException { - return delegate.getObjectAcl(getObjectAclRequest); + public DeleteBucketIntelligentTieringConfigurationResponse deleteBucketIntelligentTieringConfiguration( + Consumer deleteBucketIntelligentTieringConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketIntelligentTieringConfiguration(deleteBucketIntelligentTieringConfigurationRequest); + } + + @Override + public DeleteBucketInventoryConfigurationResponse deleteBucketInventoryConfiguration( + DeleteBucketInventoryConfigurationRequest deleteBucketInventoryConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketInventoryConfiguration(deleteBucketInventoryConfigurationRequest); + } + + @Override + public DeleteBucketInventoryConfigurationResponse deleteBucketInventoryConfiguration( + Consumer deleteBucketInventoryConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketInventoryConfiguration(deleteBucketInventoryConfigurationRequest); + } + + @Override + public DeleteBucketLifecycleResponse deleteBucketLifecycle(DeleteBucketLifecycleRequest deleteBucketLifecycleRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketLifecycle(deleteBucketLifecycleRequest); + } + + @Override + public DeleteBucketLifecycleResponse deleteBucketLifecycle(Consumer deleteBucketLifecycleRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketLifecycle(deleteBucketLifecycleRequest); } @Override - public void setObjectAcl(String bucketName, String key, AccessControlList acl) throws AmazonClientException, AmazonServiceException { - delegate.setObjectAcl(bucketName, key, acl); + public DeleteBucketMetadataTableConfigurationResponse deleteBucketMetadataTableConfiguration( + DeleteBucketMetadataTableConfigurationRequest deleteBucketMetadataTableConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketMetadataTableConfiguration(deleteBucketMetadataTableConfigurationRequest); } @Override - public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl) throws AmazonClientException, - AmazonServiceException { - delegate.setObjectAcl(bucketName, key, acl); + public DeleteBucketMetadataTableConfigurationResponse deleteBucketMetadataTableConfiguration( + Consumer deleteBucketMetadataTableConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketMetadataTableConfiguration(deleteBucketMetadataTableConfigurationRequest); } @Override - public void setObjectAcl(String bucketName, String key, String versionId, AccessControlList acl) throws AmazonClientException, - AmazonServiceException { - delegate.setObjectAcl(bucketName, key, versionId, acl); + public DeleteBucketMetricsConfigurationResponse deleteBucketMetricsConfiguration( + DeleteBucketMetricsConfigurationRequest deleteBucketMetricsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketMetricsConfiguration(deleteBucketMetricsConfigurationRequest); } @Override - public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl) throws AmazonClientException, - AmazonServiceException { - delegate.setObjectAcl(bucketName, key, versionId, acl); + public DeleteBucketMetricsConfigurationResponse deleteBucketMetricsConfiguration( + Consumer deleteBucketMetricsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketMetricsConfiguration(deleteBucketMetricsConfigurationRequest); } @Override - public void setObjectAcl(SetObjectAclRequest setObjectAclRequest) throws AmazonClientException, AmazonServiceException { - delegate.setObjectAcl(setObjectAclRequest); + public DeleteBucketOwnershipControlsResponse deleteBucketOwnershipControls( + DeleteBucketOwnershipControlsRequest deleteBucketOwnershipControlsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketOwnershipControls(deleteBucketOwnershipControlsRequest); } @Override - public AccessControlList getBucketAcl(String bucketName) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketAcl(bucketName); + public DeleteBucketOwnershipControlsResponse deleteBucketOwnershipControls( + Consumer deleteBucketOwnershipControlsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketOwnershipControls(deleteBucketOwnershipControlsRequest); } @Override - public void setBucketAcl(SetBucketAclRequest setBucketAclRequest) throws AmazonClientException, AmazonServiceException { - delegate.setBucketAcl(setBucketAclRequest); + public DeleteBucketPolicyResponse deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.deleteBucketPolicy(deleteBucketPolicyRequest); } @Override - public AccessControlList getBucketAcl(GetBucketAclRequest getBucketAclRequest) throws AmazonClientException, AmazonServiceException { + public DeleteBucketPolicyResponse deleteBucketPolicy(Consumer deleteBucketPolicyRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketPolicy(deleteBucketPolicyRequest); + } + + @Override + public DeleteBucketReplicationResponse deleteBucketReplication(DeleteBucketReplicationRequest deleteBucketReplicationRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketReplication(deleteBucketReplicationRequest); + } + + @Override + public DeleteBucketReplicationResponse deleteBucketReplication( + Consumer deleteBucketReplicationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketReplication(deleteBucketReplicationRequest); + } + + @Override + public DeleteBucketTaggingResponse deleteBucketTagging(DeleteBucketTaggingRequest deleteBucketTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketTagging(deleteBucketTaggingRequest); + } + + @Override + public DeleteBucketTaggingResponse deleteBucketTagging(Consumer deleteBucketTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketTagging(deleteBucketTaggingRequest); + } + + @Override + public DeleteBucketWebsiteResponse deleteBucketWebsite(DeleteBucketWebsiteRequest deleteBucketWebsiteRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketWebsite(deleteBucketWebsiteRequest); + } + + @Override + public DeleteBucketWebsiteResponse deleteBucketWebsite(Consumer deleteBucketWebsiteRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteBucketWebsite(deleteBucketWebsiteRequest); + } + + @Override + public DeleteObjectResponse deleteObject(DeleteObjectRequest deleteObjectRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.deleteObject(deleteObjectRequest); + } + + @Override + public DeleteObjectResponse deleteObject(Consumer deleteObjectRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.deleteObject(deleteObjectRequest); + } + + @Override + public DeleteObjectTaggingResponse deleteObjectTagging(DeleteObjectTaggingRequest deleteObjectTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteObjectTagging(deleteObjectTaggingRequest); + } + + @Override + public DeleteObjectTaggingResponse deleteObjectTagging(Consumer deleteObjectTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deleteObjectTagging(deleteObjectTaggingRequest); + } + + @Override + public DeleteObjectsResponse deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.deleteObjects(deleteObjectsRequest); + } + + @Override + public DeleteObjectsResponse deleteObjects(Consumer deleteObjectsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.deleteObjects(deleteObjectsRequest); + } + + @Override + public DeletePublicAccessBlockResponse deletePublicAccessBlock(DeletePublicAccessBlockRequest deletePublicAccessBlockRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deletePublicAccessBlock(deletePublicAccessBlockRequest); + } + + @Override + public DeletePublicAccessBlockResponse deletePublicAccessBlock( + Consumer deletePublicAccessBlockRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.deletePublicAccessBlock(deletePublicAccessBlockRequest); + } + + @Override + public GetBucketAccelerateConfigurationResponse getBucketAccelerateConfiguration( + GetBucketAccelerateConfigurationRequest getBucketAccelerateConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketAccelerateConfiguration(getBucketAccelerateConfigurationRequest); + } + + @Override + public GetBucketAccelerateConfigurationResponse getBucketAccelerateConfiguration( + Consumer getBucketAccelerateConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketAccelerateConfiguration(getBucketAccelerateConfigurationRequest); + } + + @Override + public GetBucketAclResponse getBucketAcl(GetBucketAclRequest getBucketAclRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.getBucketAcl(getBucketAclRequest); + } + + @Override + public GetBucketAclResponse getBucketAcl(Consumer getBucketAclRequest) throws AwsServiceException, + SdkClientException, S3Exception { return delegate.getBucketAcl(getBucketAclRequest); } @Override - public void setBucketAcl(String bucketName, AccessControlList acl) throws AmazonClientException, AmazonServiceException { - delegate.setBucketAcl(bucketName, acl); + public GetBucketAnalyticsConfigurationResponse getBucketAnalyticsConfiguration( + GetBucketAnalyticsConfigurationRequest getBucketAnalyticsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketAnalyticsConfiguration(getBucketAnalyticsConfigurationRequest); + } + + @Override + public GetBucketAnalyticsConfigurationResponse getBucketAnalyticsConfiguration( + Consumer getBucketAnalyticsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketAnalyticsConfiguration(getBucketAnalyticsConfigurationRequest); + } + + @Override + public GetBucketCorsResponse getBucketCors(GetBucketCorsRequest getBucketCorsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.getBucketCors(getBucketCorsRequest); + } + + @Override + public GetBucketCorsResponse getBucketCors(Consumer getBucketCorsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getBucketCors(getBucketCorsRequest); + } + + @Override + public GetBucketEncryptionResponse getBucketEncryption(GetBucketEncryptionRequest getBucketEncryptionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketEncryption(getBucketEncryptionRequest); + } + + @Override + public GetBucketEncryptionResponse getBucketEncryption(Consumer getBucketEncryptionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketEncryption(getBucketEncryptionRequest); + } + + @Override + public GetBucketIntelligentTieringConfigurationResponse getBucketIntelligentTieringConfiguration( + GetBucketIntelligentTieringConfigurationRequest getBucketIntelligentTieringConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketIntelligentTieringConfiguration(getBucketIntelligentTieringConfigurationRequest); + } + + @Override + public GetBucketIntelligentTieringConfigurationResponse getBucketIntelligentTieringConfiguration( + Consumer getBucketIntelligentTieringConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketIntelligentTieringConfiguration(getBucketIntelligentTieringConfigurationRequest); + } + + @Override + public GetBucketInventoryConfigurationResponse getBucketInventoryConfiguration( + GetBucketInventoryConfigurationRequest getBucketInventoryConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketInventoryConfiguration(getBucketInventoryConfigurationRequest); + } + + @Override + public GetBucketInventoryConfigurationResponse getBucketInventoryConfiguration( + Consumer getBucketInventoryConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketInventoryConfiguration(getBucketInventoryConfigurationRequest); + } + + @Override + public GetBucketLifecycleConfigurationResponse getBucketLifecycleConfiguration( + GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest); + } + + @Override + public GetBucketLifecycleConfigurationResponse getBucketLifecycleConfiguration( + Consumer getBucketLifecycleConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest); } @Override - public void setBucketAcl(String bucketName, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException { - delegate.setBucketAcl(bucketName, acl); + public GetBucketLocationResponse getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getBucketLocation(getBucketLocationRequest); } @Override - public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonClientException, AmazonServiceException { - return delegate.getObjectMetadata(bucketName, key); + public GetBucketLocationResponse getBucketLocation(Consumer getBucketLocationRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketLocation(getBucketLocationRequest); } @Override - public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) throws AmazonClientException, - AmazonServiceException { - return delegate.getObjectMetadata(getObjectMetadataRequest); + public GetBucketLoggingResponse getBucketLogging(GetBucketLoggingRequest getBucketLoggingRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getBucketLogging(getBucketLoggingRequest); } @Override - public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException { - return delegate.getObject(bucketName, key); + public GetBucketLoggingResponse getBucketLogging(Consumer getBucketLoggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketLogging(getBucketLoggingRequest); } @Override - public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException { + public GetBucketMetadataTableConfigurationResponse getBucketMetadataTableConfiguration( + GetBucketMetadataTableConfigurationRequest getBucketMetadataTableConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketMetadataTableConfiguration(getBucketMetadataTableConfigurationRequest); + } + + @Override + public GetBucketMetadataTableConfigurationResponse getBucketMetadataTableConfiguration( + Consumer getBucketMetadataTableConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketMetadataTableConfiguration(getBucketMetadataTableConfigurationRequest); + } + + @Override + public GetBucketMetricsConfigurationResponse getBucketMetricsConfiguration( + GetBucketMetricsConfigurationRequest getBucketMetricsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketMetricsConfiguration(getBucketMetricsConfigurationRequest); + } + + @Override + public GetBucketMetricsConfigurationResponse getBucketMetricsConfiguration( + Consumer getBucketMetricsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketMetricsConfiguration(getBucketMetricsConfigurationRequest); + } + + @Override + public GetBucketNotificationConfigurationResponse getBucketNotificationConfiguration( + GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest); + } + + @Override + public GetBucketNotificationConfigurationResponse getBucketNotificationConfiguration( + Consumer getBucketNotificationConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest); + } + + @Override + public GetBucketOwnershipControlsResponse getBucketOwnershipControls( + GetBucketOwnershipControlsRequest getBucketOwnershipControlsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketOwnershipControls(getBucketOwnershipControlsRequest); + } + + @Override + public GetBucketOwnershipControlsResponse getBucketOwnershipControls( + Consumer getBucketOwnershipControlsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketOwnershipControls(getBucketOwnershipControlsRequest); + } + + @Override + public GetBucketPolicyResponse getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getBucketPolicy(getBucketPolicyRequest); + } + + @Override + public GetBucketPolicyResponse getBucketPolicy(Consumer getBucketPolicyRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketPolicy(getBucketPolicyRequest); + } + + @Override + public GetBucketPolicyStatusResponse getBucketPolicyStatus(GetBucketPolicyStatusRequest getBucketPolicyStatusRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketPolicyStatus(getBucketPolicyStatusRequest); + } + + @Override + public GetBucketPolicyStatusResponse getBucketPolicyStatus(Consumer getBucketPolicyStatusRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketPolicyStatus(getBucketPolicyStatusRequest); + } + + @Override + public GetBucketReplicationResponse getBucketReplication(GetBucketReplicationRequest getBucketReplicationRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketReplication(getBucketReplicationRequest); + } + + @Override + public GetBucketReplicationResponse getBucketReplication(Consumer getBucketReplicationRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketReplication(getBucketReplicationRequest); + } + + @Override + public GetBucketRequestPaymentResponse getBucketRequestPayment(GetBucketRequestPaymentRequest getBucketRequestPaymentRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketRequestPayment(getBucketRequestPaymentRequest); + } + + @Override + public GetBucketRequestPaymentResponse getBucketRequestPayment( + Consumer getBucketRequestPaymentRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketRequestPayment(getBucketRequestPaymentRequest); + } + + @Override + public GetBucketTaggingResponse getBucketTagging(GetBucketTaggingRequest getBucketTaggingRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getBucketTagging(getBucketTaggingRequest); + } + + @Override + public GetBucketTaggingResponse getBucketTagging(Consumer getBucketTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketTagging(getBucketTaggingRequest); + } + + @Override + public GetBucketVersioningResponse getBucketVersioning(GetBucketVersioningRequest getBucketVersioningRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketVersioning(getBucketVersioningRequest); + } + + @Override + public GetBucketVersioningResponse getBucketVersioning(Consumer getBucketVersioningRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketVersioning(getBucketVersioningRequest); + } + + @Override + public GetBucketWebsiteResponse getBucketWebsite(GetBucketWebsiteRequest getBucketWebsiteRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getBucketWebsite(getBucketWebsiteRequest); + } + + @Override + public GetBucketWebsiteResponse getBucketWebsite(Consumer getBucketWebsiteRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getBucketWebsite(getBucketWebsiteRequest); + } + + @Override + public ReturnT getObject( + GetObjectRequest getObjectRequest, + ResponseTransformer responseTransformer + ) throws NoSuchKeyException, InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObject(getObjectRequest, responseTransformer); + } + + @Override + public ReturnT getObject( + Consumer getObjectRequest, + ResponseTransformer responseTransformer + ) throws NoSuchKeyException, InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObject(getObjectRequest, responseTransformer); + } + + @Override + public GetObjectResponse getObject(GetObjectRequest getObjectRequest, Path destinationPath) throws NoSuchKeyException, + InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObject(getObjectRequest, destinationPath); + } + + @Override + public GetObjectResponse getObject(Consumer getObjectRequest, Path destinationPath) throws NoSuchKeyException, + InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObject(getObjectRequest, destinationPath); + } + + @Override + public ResponseInputStream getObject(GetObjectRequest getObjectRequest) throws NoSuchKeyException, + InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { return delegate.getObject(getObjectRequest); } @Override - public ObjectMetadata getObject(GetObjectRequest getObjectRequest, File destinationFile) throws AmazonClientException, - AmazonServiceException { - return delegate.getObject(getObjectRequest, destinationFile); + public ResponseInputStream getObject(Consumer getObjectRequest) throws NoSuchKeyException, + InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObject(getObjectRequest); } @Override - public void deleteBucket(DeleteBucketRequest deleteBucketRequest) throws AmazonClientException, AmazonServiceException { - delegate.deleteBucket(deleteBucketRequest); + public ResponseBytes getObjectAsBytes(GetObjectRequest getObjectRequest) throws NoSuchKeyException, + InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectAsBytes(getObjectRequest); } @Override - public void deleteBucket(String bucketName) throws AmazonClientException, AmazonServiceException { - delegate.deleteBucket(bucketName); + public ResponseBytes getObjectAsBytes(Consumer getObjectRequest) throws NoSuchKeyException, + InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectAsBytes(getObjectRequest); } @Override - public void setBucketReplicationConfiguration(String bucketName, BucketReplicationConfiguration configuration) - throws AmazonServiceException, AmazonClientException { - delegate.setBucketReplicationConfiguration(bucketName, configuration); + public GetObjectAclResponse getObjectAcl(GetObjectAclRequest getObjectAclRequest) throws NoSuchKeyException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.getObjectAcl(getObjectAclRequest); } @Override - public void setBucketReplicationConfiguration(SetBucketReplicationConfigurationRequest setBucketReplicationConfigurationRequest) - throws AmazonServiceException, AmazonClientException { - delegate.setBucketReplicationConfiguration(setBucketReplicationConfigurationRequest); + public GetObjectAclResponse getObjectAcl(Consumer getObjectAclRequest) throws NoSuchKeyException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectAcl(getObjectAclRequest); } @Override - public BucketReplicationConfiguration getBucketReplicationConfiguration(String bucketName) throws AmazonServiceException, - AmazonClientException { - return delegate.getBucketReplicationConfiguration(bucketName); + public GetObjectAttributesResponse getObjectAttributes(GetObjectAttributesRequest getObjectAttributesRequest) throws NoSuchKeyException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectAttributes(getObjectAttributesRequest); } @Override - public void deleteBucketReplicationConfiguration(String bucketName) throws AmazonServiceException, AmazonClientException { - delegate.deleteBucketReplicationConfiguration(bucketName); + public GetObjectAttributesResponse getObjectAttributes(Consumer getObjectAttributesRequest) + throws NoSuchKeyException, AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectAttributes(getObjectAttributesRequest); } @Override - public void deleteBucketReplicationConfiguration(DeleteBucketReplicationConfigurationRequest request) throws AmazonServiceException, - AmazonClientException { - delegate.deleteBucketReplicationConfiguration(request); + public GetObjectLegalHoldResponse getObjectLegalHold(GetObjectLegalHoldRequest getObjectLegalHoldRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getObjectLegalHold(getObjectLegalHoldRequest); } @Override - public boolean doesObjectExist(String bucketName, String objectName) throws AmazonServiceException, AmazonClientException { - return delegate.doesObjectExist(bucketName, objectName); + public GetObjectLegalHoldResponse getObjectLegalHold(Consumer getObjectLegalHoldRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectLegalHold(getObjectLegalHoldRequest); } @Override - public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException { - return delegate.putObject(putObjectRequest); + public GetObjectLockConfigurationResponse getObjectLockConfiguration( + GetObjectLockConfigurationRequest getObjectLockConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectLockConfiguration(getObjectLockConfigurationRequest); } @Override - public PutObjectResult putObject(String bucketName, String key, File file) throws AmazonClientException, AmazonServiceException { - return delegate.putObject(bucketName, key, file); + public GetObjectLockConfigurationResponse getObjectLockConfiguration( + Consumer getObjectLockConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectLockConfiguration(getObjectLockConfigurationRequest); } @Override - public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) - throws AmazonClientException, AmazonServiceException { - return delegate.putObject(bucketName, key, input, metadata); + public GetObjectRetentionResponse getObjectRetention(GetObjectRetentionRequest getObjectRetentionRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getObjectRetention(getObjectRetentionRequest); } @Override - public CopyObjectResult copyObject(String sourceBucketName, String sourceKey, String destinationBucketName, String destinationKey) - throws AmazonClientException, AmazonServiceException { - return delegate.copyObject(sourceBucketName, sourceKey, destinationBucketName, destinationKey); + public GetObjectRetentionResponse getObjectRetention(Consumer getObjectRetentionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectRetention(getObjectRetentionRequest); } @Override - public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) throws AmazonClientException, AmazonServiceException { - return delegate.copyObject(copyObjectRequest); + public GetObjectTaggingResponse getObjectTagging(GetObjectTaggingRequest getObjectTaggingRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.getObjectTagging(getObjectTaggingRequest); } @Override - public CopyPartResult copyPart(CopyPartRequest copyPartRequest) throws AmazonClientException, AmazonServiceException { - return delegate.copyPart(copyPartRequest); + public GetObjectTaggingResponse getObjectTagging(Consumer getObjectTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTagging(getObjectTaggingRequest); } @Override - public void deleteObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException { - delegate.deleteObject(bucketName, key); + public ReturnT getObjectTorrent( + GetObjectTorrentRequest getObjectTorrentRequest, + ResponseTransformer responseTransformer + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrent(getObjectTorrentRequest, responseTransformer); } @Override - public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws AmazonClientException, AmazonServiceException { - delegate.deleteObject(deleteObjectRequest); + public ReturnT getObjectTorrent( + Consumer getObjectTorrentRequest, + ResponseTransformer responseTransformer + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrent(getObjectTorrentRequest, responseTransformer); } @Override - public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AmazonClientException, - AmazonServiceException { - return delegate.deleteObjects(deleteObjectsRequest); + public GetObjectTorrentResponse getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest, Path destinationPath) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrent(getObjectTorrentRequest, destinationPath); } @Override - public void deleteVersion(String bucketName, String key, String versionId) throws AmazonClientException, AmazonServiceException { - delegate.deleteVersion(bucketName, key, versionId); + public GetObjectTorrentResponse getObjectTorrent( + Consumer getObjectTorrentRequest, + Path destinationPath + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrent(getObjectTorrentRequest, destinationPath); } @Override - public void deleteVersion(DeleteVersionRequest deleteVersionRequest) throws AmazonClientException, AmazonServiceException { - delegate.deleteVersion(deleteVersionRequest); + public ResponseInputStream getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrent(getObjectTorrentRequest); } @Override - public BucketLoggingConfiguration getBucketLoggingConfiguration(String bucketName) throws AmazonClientException, - AmazonServiceException { - return delegate.getBucketLoggingConfiguration(bucketName); + public ResponseInputStream getObjectTorrent(Consumer getObjectTorrentRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrent(getObjectTorrentRequest); } @Override - public void setBucketLoggingConfiguration(SetBucketLoggingConfigurationRequest setBucketLoggingConfigurationRequest) - throws AmazonClientException, AmazonServiceException { - delegate.setBucketLoggingConfiguration(setBucketLoggingConfigurationRequest); + public ResponseBytes getObjectTorrentAsBytes(GetObjectTorrentRequest getObjectTorrentRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrentAsBytes(getObjectTorrentRequest); } @Override - public BucketVersioningConfiguration getBucketVersioningConfiguration(String bucketName) throws AmazonClientException, - AmazonServiceException { - return delegate.getBucketVersioningConfiguration(bucketName); + public ResponseBytes getObjectTorrentAsBytes( + Consumer getObjectTorrentRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getObjectTorrentAsBytes(getObjectTorrentRequest); } @Override - public void setBucketVersioningConfiguration(SetBucketVersioningConfigurationRequest setBucketVersioningConfigurationRequest) - throws AmazonClientException, AmazonServiceException { - delegate.setBucketVersioningConfiguration(setBucketVersioningConfigurationRequest); + public GetPublicAccessBlockResponse getPublicAccessBlock(GetPublicAccessBlockRequest getPublicAccessBlockRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getPublicAccessBlock(getPublicAccessBlockRequest); } @Override - public BucketLifecycleConfiguration getBucketLifecycleConfiguration(String bucketName) { - return delegate.getBucketLifecycleConfiguration(bucketName); + public GetPublicAccessBlockResponse getPublicAccessBlock(Consumer getPublicAccessBlockRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.getPublicAccessBlock(getPublicAccessBlockRequest); } @Override - public void setBucketLifecycleConfiguration(String bucketName, BucketLifecycleConfiguration bucketLifecycleConfiguration) { - delegate.setBucketLifecycleConfiguration(bucketName, bucketLifecycleConfiguration); + public HeadBucketResponse headBucket(HeadBucketRequest headBucketRequest) throws NoSuchBucketException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.headBucket(headBucketRequest); } @Override - public void setBucketLifecycleConfiguration(SetBucketLifecycleConfigurationRequest setBucketLifecycleConfigurationRequest) { - delegate.setBucketLifecycleConfiguration(setBucketLifecycleConfigurationRequest); + public HeadBucketResponse headBucket(Consumer headBucketRequest) throws NoSuchBucketException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.headBucket(headBucketRequest); } @Override - public void deleteBucketLifecycleConfiguration(String bucketName) { - delegate.deleteBucketLifecycleConfiguration(bucketName); + public HeadObjectResponse headObject(HeadObjectRequest headObjectRequest) throws NoSuchKeyException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.headObject(headObjectRequest); } @Override - public void deleteBucketLifecycleConfiguration(DeleteBucketLifecycleConfigurationRequest deleteBucketLifecycleConfigurationRequest) { - delegate.deleteBucketLifecycleConfiguration(deleteBucketLifecycleConfigurationRequest); + public HeadObjectResponse headObject(Consumer headObjectRequest) throws NoSuchKeyException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.headObject(headObjectRequest); } @Override - public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(String bucketName) { - return delegate.getBucketCrossOriginConfiguration(bucketName); + public ListBucketAnalyticsConfigurationsResponse listBucketAnalyticsConfigurations( + ListBucketAnalyticsConfigurationsRequest listBucketAnalyticsConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketAnalyticsConfigurations(listBucketAnalyticsConfigurationsRequest); } @Override - public void setBucketCrossOriginConfiguration(String bucketName, BucketCrossOriginConfiguration bucketCrossOriginConfiguration) { - delegate.setBucketCrossOriginConfiguration(bucketName, bucketCrossOriginConfiguration); + public ListBucketAnalyticsConfigurationsResponse listBucketAnalyticsConfigurations( + Consumer listBucketAnalyticsConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketAnalyticsConfigurations(listBucketAnalyticsConfigurationsRequest); } @Override - public void setBucketCrossOriginConfiguration(SetBucketCrossOriginConfigurationRequest setBucketCrossOriginConfigurationRequest) { - delegate.setBucketCrossOriginConfiguration(setBucketCrossOriginConfigurationRequest); + public ListBucketIntelligentTieringConfigurationsResponse listBucketIntelligentTieringConfigurations( + ListBucketIntelligentTieringConfigurationsRequest listBucketIntelligentTieringConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketIntelligentTieringConfigurations(listBucketIntelligentTieringConfigurationsRequest); } @Override - public void deleteBucketCrossOriginConfiguration(String bucketName) { - delegate.deleteBucketCrossOriginConfiguration(bucketName); + public ListBucketIntelligentTieringConfigurationsResponse listBucketIntelligentTieringConfigurations( + Consumer listBucketIntelligentTieringConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketIntelligentTieringConfigurations(listBucketIntelligentTieringConfigurationsRequest); } @Override - public void deleteBucketCrossOriginConfiguration( - DeleteBucketCrossOriginConfigurationRequest deleteBucketCrossOriginConfigurationRequest - ) { - delegate.deleteBucketCrossOriginConfiguration(deleteBucketCrossOriginConfigurationRequest); + public ListBucketInventoryConfigurationsResponse listBucketInventoryConfigurations( + ListBucketInventoryConfigurationsRequest listBucketInventoryConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketInventoryConfigurations(listBucketInventoryConfigurationsRequest); } @Override - public BucketTaggingConfiguration getBucketTaggingConfiguration(String bucketName) { - return delegate.getBucketTaggingConfiguration(bucketName); + public ListBucketInventoryConfigurationsResponse listBucketInventoryConfigurations( + Consumer listBucketInventoryConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketInventoryConfigurations(listBucketInventoryConfigurationsRequest); } @Override - public void setBucketTaggingConfiguration(String bucketName, BucketTaggingConfiguration bucketTaggingConfiguration) { - delegate.setBucketTaggingConfiguration(bucketName, bucketTaggingConfiguration); + public ListBucketMetricsConfigurationsResponse listBucketMetricsConfigurations( + ListBucketMetricsConfigurationsRequest listBucketMetricsConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketMetricsConfigurations(listBucketMetricsConfigurationsRequest); } @Override - public void setBucketTaggingConfiguration(SetBucketTaggingConfigurationRequest setBucketTaggingConfigurationRequest) { - delegate.setBucketTaggingConfiguration(setBucketTaggingConfigurationRequest); + public ListBucketMetricsConfigurationsResponse listBucketMetricsConfigurations( + Consumer listBucketMetricsConfigurationsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketMetricsConfigurations(listBucketMetricsConfigurationsRequest); } @Override - public void deleteBucketTaggingConfiguration(String bucketName) { - delegate.deleteBucketTaggingConfiguration(bucketName); + public ListBucketsResponse listBuckets(ListBucketsRequest listBucketsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.listBuckets(listBucketsRequest); } @Override - public void deleteBucketTaggingConfiguration(DeleteBucketTaggingConfigurationRequest deleteBucketTaggingConfigurationRequest) { - delegate.deleteBucketTaggingConfiguration(deleteBucketTaggingConfigurationRequest); + public ListBucketsResponse listBuckets(Consumer listBucketsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.listBuckets(listBucketsRequest); } @Override - public BucketNotificationConfiguration getBucketNotificationConfiguration(String bucketName) throws AmazonClientException, - AmazonServiceException { - return delegate.getBucketNotificationConfiguration(bucketName); + public ListBucketsResponse listBuckets() throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBuckets(); } @Override - public void setBucketNotificationConfiguration(SetBucketNotificationConfigurationRequest setBucketNotificationConfigurationRequest) - throws AmazonClientException, AmazonServiceException { - delegate.setBucketNotificationConfiguration(setBucketNotificationConfigurationRequest); + public ListBucketsIterable listBucketsPaginator() throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listBucketsPaginator(); } @Override - public void setBucketNotificationConfiguration(String bucketName, BucketNotificationConfiguration bucketNotificationConfiguration) - throws AmazonClientException, AmazonServiceException { - delegate.setBucketNotificationConfiguration(bucketName, bucketNotificationConfiguration); + public ListBucketsIterable listBucketsPaginator(ListBucketsRequest listBucketsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.listBucketsPaginator(listBucketsRequest); } @Override - public BucketWebsiteConfiguration getBucketWebsiteConfiguration(String bucketName) throws AmazonClientException, - AmazonServiceException { - return delegate.getBucketWebsiteConfiguration(bucketName); + public ListBucketsIterable listBucketsPaginator(Consumer listBucketsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.listBucketsPaginator(listBucketsRequest); } @Override - public BucketWebsiteConfiguration getBucketWebsiteConfiguration( - GetBucketWebsiteConfigurationRequest getBucketWebsiteConfigurationRequest - ) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketWebsiteConfiguration(getBucketWebsiteConfigurationRequest); + public ListDirectoryBucketsResponse listDirectoryBuckets(ListDirectoryBucketsRequest listDirectoryBucketsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listDirectoryBuckets(listDirectoryBucketsRequest); } @Override - public void setBucketWebsiteConfiguration(String bucketName, BucketWebsiteConfiguration configuration) throws AmazonClientException, - AmazonServiceException { - delegate.setBucketWebsiteConfiguration(bucketName, configuration); + public ListDirectoryBucketsResponse listDirectoryBuckets(Consumer listDirectoryBucketsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listDirectoryBuckets(listDirectoryBucketsRequest); } @Override - public void setBucketWebsiteConfiguration(SetBucketWebsiteConfigurationRequest setBucketWebsiteConfigurationRequest) - throws AmazonClientException, AmazonServiceException { - delegate.setBucketWebsiteConfiguration(setBucketWebsiteConfigurationRequest); + public ListDirectoryBucketsIterable listDirectoryBucketsPaginator(ListDirectoryBucketsRequest listDirectoryBucketsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listDirectoryBucketsPaginator(listDirectoryBucketsRequest); } @Override - public void deleteBucketWebsiteConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException { - delegate.deleteBucketWebsiteConfiguration(bucketName); + public ListDirectoryBucketsIterable listDirectoryBucketsPaginator( + Consumer listDirectoryBucketsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listDirectoryBucketsPaginator(listDirectoryBucketsRequest); } @Override - public void deleteBucketWebsiteConfiguration(DeleteBucketWebsiteConfigurationRequest deleteBucketWebsiteConfigurationRequest) - throws AmazonClientException, AmazonServiceException { - delegate.deleteBucketWebsiteConfiguration(deleteBucketWebsiteConfigurationRequest); + public ListMultipartUploadsResponse listMultipartUploads(ListMultipartUploadsRequest listMultipartUploadsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listMultipartUploads(listMultipartUploadsRequest); } @Override - public BucketPolicy getBucketPolicy(String bucketName) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketPolicy(bucketName); + public ListMultipartUploadsResponse listMultipartUploads(Consumer listMultipartUploadsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listMultipartUploads(listMultipartUploadsRequest); } @Override - public BucketPolicy getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AmazonClientException, - AmazonServiceException { - return delegate.getBucketPolicy(getBucketPolicyRequest); + public ListMultipartUploadsIterable listMultipartUploadsPaginator(ListMultipartUploadsRequest listMultipartUploadsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listMultipartUploadsPaginator(listMultipartUploadsRequest); } @Override - public void setBucketPolicy(String bucketName, String policyText) throws AmazonClientException, AmazonServiceException { - delegate.setBucketPolicy(bucketName, policyText); + public ListMultipartUploadsIterable listMultipartUploadsPaginator( + Consumer listMultipartUploadsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listMultipartUploadsPaginator(listMultipartUploadsRequest); } @Override - public void setBucketPolicy(SetBucketPolicyRequest setBucketPolicyRequest) throws AmazonClientException, AmazonServiceException { - delegate.setBucketPolicy(setBucketPolicyRequest); + public ListObjectVersionsResponse listObjectVersions(ListObjectVersionsRequest listObjectVersionsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.listObjectVersions(listObjectVersionsRequest); } @Override - public void deleteBucketPolicy(String bucketName) throws AmazonClientException, AmazonServiceException { - delegate.deleteBucketPolicy(bucketName); + public ListObjectVersionsResponse listObjectVersions(Consumer listObjectVersionsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjectVersions(listObjectVersionsRequest); } @Override - public void deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) throws AmazonClientException, - AmazonServiceException { - delegate.deleteBucketPolicy(deleteBucketPolicyRequest); + public ListObjectVersionsIterable listObjectVersionsPaginator(ListObjectVersionsRequest listObjectVersionsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjectVersionsPaginator(listObjectVersionsRequest); } @Override - public URL generatePresignedUrl(String bucketName, String key, Date expiration) throws AmazonClientException { - return delegate.generatePresignedUrl(bucketName, key, expiration); + public ListObjectVersionsIterable listObjectVersionsPaginator(Consumer listObjectVersionsRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjectVersionsPaginator(listObjectVersionsRequest); } @Override - public URL generatePresignedUrl(String bucketName, String key, Date expiration, HttpMethod method) throws AmazonClientException { - return delegate.generatePresignedUrl(bucketName, key, expiration, method); + public ListObjectsResponse listObjects(ListObjectsRequest listObjectsRequest) throws NoSuchBucketException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.listObjects(listObjectsRequest); } @Override - public URL generatePresignedUrl(GeneratePresignedUrlRequest generatePresignedUrlRequest) throws AmazonClientException { - return delegate.generatePresignedUrl(generatePresignedUrlRequest); + public ListObjectsResponse listObjects(Consumer listObjectsRequest) throws NoSuchBucketException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjects(listObjectsRequest); } @Override - public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws AmazonClientException, - AmazonServiceException { - return delegate.initiateMultipartUpload(request); + public ListObjectsV2Response listObjectsV2(ListObjectsV2Request listObjectsV2Request) throws NoSuchBucketException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.listObjectsV2(listObjectsV2Request); } @Override - public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException { - return delegate.uploadPart(request); + public ListObjectsV2Response listObjectsV2(Consumer listObjectsV2Request) throws NoSuchBucketException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjectsV2(listObjectsV2Request); } @Override - public PartListing listParts(ListPartsRequest request) throws AmazonClientException, AmazonServiceException { - return delegate.listParts(request); + public ListObjectsV2Iterable listObjectsV2Paginator(ListObjectsV2Request listObjectsV2Request) throws NoSuchBucketException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjectsV2Paginator(listObjectsV2Request); } @Override - public void abortMultipartUpload(AbortMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException { - delegate.abortMultipartUpload(request); + public ListObjectsV2Iterable listObjectsV2Paginator(Consumer listObjectsV2Request) + throws NoSuchBucketException, AwsServiceException, SdkClientException, S3Exception { + return delegate.listObjectsV2Paginator(listObjectsV2Request); } @Override - public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException, - AmazonServiceException { - return delegate.completeMultipartUpload(request); + public ListPartsResponse listParts(ListPartsRequest listPartsRequest) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.listParts(listPartsRequest); } @Override - public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest request) throws AmazonClientException, - AmazonServiceException { - return delegate.listMultipartUploads(request); + public ListPartsResponse listParts(Consumer listPartsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.listParts(listPartsRequest); } @Override - public S3ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { - return delegate.getCachedResponseMetadata(request); + public ListPartsIterable listPartsPaginator(ListPartsRequest listPartsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.listPartsPaginator(listPartsRequest); } @Override - public void restoreObject(RestoreObjectRequest copyGlacierObjectRequest) throws AmazonServiceException { - delegate.restoreObject(copyGlacierObjectRequest); + public ListPartsIterable listPartsPaginator(Consumer listPartsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.listPartsPaginator(listPartsRequest); } @Override - public void restoreObject(String bucketName, String key, int expirationInDays) throws AmazonServiceException { - delegate.restoreObject(bucketName, key, expirationInDays); + public PutBucketAccelerateConfigurationResponse putBucketAccelerateConfiguration( + PutBucketAccelerateConfigurationRequest putBucketAccelerateConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketAccelerateConfiguration(putBucketAccelerateConfigurationRequest); } @Override - public void enableRequesterPays(String bucketName) throws AmazonServiceException, AmazonClientException { - delegate.enableRequesterPays(bucketName); + public PutBucketAccelerateConfigurationResponse putBucketAccelerateConfiguration( + Consumer putBucketAccelerateConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketAccelerateConfiguration(putBucketAccelerateConfigurationRequest); } @Override - public void disableRequesterPays(String bucketName) throws AmazonServiceException, AmazonClientException { - delegate.disableRequesterPays(bucketName); + public PutBucketAclResponse putBucketAcl(PutBucketAclRequest putBucketAclRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.putBucketAcl(putBucketAclRequest); } @Override - public boolean isRequesterPaysEnabled(String bucketName) throws AmazonServiceException, AmazonClientException { - return delegate.isRequesterPaysEnabled(bucketName); + public PutBucketAclResponse putBucketAcl(Consumer putBucketAclRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putBucketAcl(putBucketAclRequest); } @Override - public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest) throws AmazonClientException, - AmazonServiceException { - return delegate.listNextBatchOfObjects(listNextBatchOfObjectsRequest); + public PutBucketAnalyticsConfigurationResponse putBucketAnalyticsConfiguration( + PutBucketAnalyticsConfigurationRequest putBucketAnalyticsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketAnalyticsConfiguration(putBucketAnalyticsConfigurationRequest); } @Override - public VersionListing listNextBatchOfVersions(ListNextBatchOfVersionsRequest listNextBatchOfVersionsRequest) - throws AmazonClientException, AmazonServiceException { - return delegate.listNextBatchOfVersions(listNextBatchOfVersionsRequest); + public PutBucketAnalyticsConfigurationResponse putBucketAnalyticsConfiguration( + Consumer putBucketAnalyticsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketAnalyticsConfiguration(putBucketAnalyticsConfigurationRequest); } @Override - public Owner getS3AccountOwner(GetS3AccountOwnerRequest getS3AccountOwnerRequest) throws AmazonClientException, AmazonServiceException { - return delegate.getS3AccountOwner(getS3AccountOwnerRequest); + public PutBucketCorsResponse putBucketCors(PutBucketCorsRequest putBucketCorsRequest) throws AwsServiceException, SdkClientException, + S3Exception { + return delegate.putBucketCors(putBucketCorsRequest); } @Override - public BucketLoggingConfiguration getBucketLoggingConfiguration( - GetBucketLoggingConfigurationRequest getBucketLoggingConfigurationRequest - ) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketLoggingConfiguration(getBucketLoggingConfigurationRequest); + public PutBucketCorsResponse putBucketCors(Consumer putBucketCorsRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putBucketCors(putBucketCorsRequest); } @Override - public BucketVersioningConfiguration getBucketVersioningConfiguration( - GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest - ) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketVersioningConfiguration(getBucketVersioningConfigurationRequest); + public PutBucketEncryptionResponse putBucketEncryption(PutBucketEncryptionRequest putBucketEncryptionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketEncryption(putBucketEncryptionRequest); } @Override - public BucketLifecycleConfiguration getBucketLifecycleConfiguration( - GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest - ) { - return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest); + public PutBucketEncryptionResponse putBucketEncryption(Consumer putBucketEncryptionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketEncryption(putBucketEncryptionRequest); } @Override - public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration( - GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest - ) { - return delegate.getBucketCrossOriginConfiguration(getBucketCrossOriginConfigurationRequest); + public PutBucketIntelligentTieringConfigurationResponse putBucketIntelligentTieringConfiguration( + PutBucketIntelligentTieringConfigurationRequest putBucketIntelligentTieringConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketIntelligentTieringConfiguration(putBucketIntelligentTieringConfigurationRequest); } @Override - public BucketTaggingConfiguration getBucketTaggingConfiguration( - GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest - ) { - return delegate.getBucketTaggingConfiguration(getBucketTaggingConfigurationRequest); + public PutBucketIntelligentTieringConfigurationResponse putBucketIntelligentTieringConfiguration( + Consumer putBucketIntelligentTieringConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketIntelligentTieringConfiguration(putBucketIntelligentTieringConfigurationRequest); } @Override - public BucketNotificationConfiguration getBucketNotificationConfiguration( - GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest - ) throws AmazonClientException, AmazonServiceException { - return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest); + public PutBucketInventoryConfigurationResponse putBucketInventoryConfiguration( + PutBucketInventoryConfigurationRequest putBucketInventoryConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketInventoryConfiguration(putBucketInventoryConfigurationRequest); } @Override - public BucketReplicationConfiguration getBucketReplicationConfiguration( - GetBucketReplicationConfigurationRequest getBucketReplicationConfigurationRequest - ) throws AmazonServiceException, AmazonClientException { - return delegate.getBucketReplicationConfiguration(getBucketReplicationConfigurationRequest); + public PutBucketInventoryConfigurationResponse putBucketInventoryConfiguration( + Consumer putBucketInventoryConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketInventoryConfiguration(putBucketInventoryConfigurationRequest); } @Override - public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws AmazonClientException, AmazonServiceException { - return delegate.headBucket(headBucketRequest); + public PutBucketLifecycleConfigurationResponse putBucketLifecycleConfiguration( + PutBucketLifecycleConfigurationRequest putBucketLifecycleConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketLifecycleConfiguration(putBucketLifecycleConfigurationRequest); + } + + @Override + public PutBucketLifecycleConfigurationResponse putBucketLifecycleConfiguration( + Consumer putBucketLifecycleConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketLifecycleConfiguration(putBucketLifecycleConfigurationRequest); + } + + @Override + public PutBucketLoggingResponse putBucketLogging(PutBucketLoggingRequest putBucketLoggingRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putBucketLogging(putBucketLoggingRequest); + } + + @Override + public PutBucketLoggingResponse putBucketLogging(Consumer putBucketLoggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketLogging(putBucketLoggingRequest); + } + + @Override + public PutBucketMetricsConfigurationResponse putBucketMetricsConfiguration( + PutBucketMetricsConfigurationRequest putBucketMetricsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketMetricsConfiguration(putBucketMetricsConfigurationRequest); + } + + @Override + public PutBucketMetricsConfigurationResponse putBucketMetricsConfiguration( + Consumer putBucketMetricsConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketMetricsConfiguration(putBucketMetricsConfigurationRequest); + } + + @Override + public PutBucketNotificationConfigurationResponse putBucketNotificationConfiguration( + PutBucketNotificationConfigurationRequest putBucketNotificationConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketNotificationConfiguration(putBucketNotificationConfigurationRequest); + } + + @Override + public PutBucketNotificationConfigurationResponse putBucketNotificationConfiguration( + Consumer putBucketNotificationConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketNotificationConfiguration(putBucketNotificationConfigurationRequest); + } + + @Override + public PutBucketOwnershipControlsResponse putBucketOwnershipControls( + PutBucketOwnershipControlsRequest putBucketOwnershipControlsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketOwnershipControls(putBucketOwnershipControlsRequest); + } + + @Override + public PutBucketOwnershipControlsResponse putBucketOwnershipControls( + Consumer putBucketOwnershipControlsRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketOwnershipControls(putBucketOwnershipControlsRequest); + } + + @Override + public PutBucketPolicyResponse putBucketPolicy(PutBucketPolicyRequest putBucketPolicyRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putBucketPolicy(putBucketPolicyRequest); + } + + @Override + public PutBucketPolicyResponse putBucketPolicy(Consumer putBucketPolicyRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketPolicy(putBucketPolicyRequest); + } + + @Override + public PutBucketReplicationResponse putBucketReplication(PutBucketReplicationRequest putBucketReplicationRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketReplication(putBucketReplicationRequest); + } + + @Override + public PutBucketReplicationResponse putBucketReplication(Consumer putBucketReplicationRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketReplication(putBucketReplicationRequest); + } + + @Override + public PutBucketRequestPaymentResponse putBucketRequestPayment(PutBucketRequestPaymentRequest putBucketRequestPaymentRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketRequestPayment(putBucketRequestPaymentRequest); + } + + @Override + public PutBucketRequestPaymentResponse putBucketRequestPayment( + Consumer putBucketRequestPaymentRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketRequestPayment(putBucketRequestPaymentRequest); + } + + @Override + public PutBucketTaggingResponse putBucketTagging(PutBucketTaggingRequest putBucketTaggingRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putBucketTagging(putBucketTaggingRequest); + } + + @Override + public PutBucketTaggingResponse putBucketTagging(Consumer putBucketTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketTagging(putBucketTaggingRequest); + } + + @Override + public PutBucketVersioningResponse putBucketVersioning(PutBucketVersioningRequest putBucketVersioningRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketVersioning(putBucketVersioningRequest); + } + + @Override + public PutBucketVersioningResponse putBucketVersioning(Consumer putBucketVersioningRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketVersioning(putBucketVersioningRequest); + } + + @Override + public PutBucketWebsiteResponse putBucketWebsite(PutBucketWebsiteRequest putBucketWebsiteRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putBucketWebsite(putBucketWebsiteRequest); + } + + @Override + public PutBucketWebsiteResponse putBucketWebsite(Consumer putBucketWebsiteRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putBucketWebsite(putBucketWebsiteRequest); + } + + @Override + public PutObjectResponse putObject(PutObjectRequest putObjectRequest, RequestBody requestBody) throws InvalidRequestException, + InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, AwsServiceException, SdkClientException, + S3Exception { + return delegate.putObject(putObjectRequest, requestBody); + } + + @Override + public PutObjectResponse putObject(Consumer putObjectRequest, RequestBody requestBody) + throws InvalidRequestException, InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.putObject(putObjectRequest, requestBody); + } + + @Override + public PutObjectResponse putObject(PutObjectRequest putObjectRequest, Path sourcePath) throws InvalidRequestException, + InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, AwsServiceException, SdkClientException, + S3Exception { + return delegate.putObject(putObjectRequest, sourcePath); + } + + @Override + public PutObjectResponse putObject(Consumer putObjectRequest, Path sourcePath) throws InvalidRequestException, + InvalidWriteOffsetException, TooManyPartsException, EncryptionTypeMismatchException, AwsServiceException, SdkClientException, + S3Exception { + return delegate.putObject(putObjectRequest, sourcePath); + } + + @Override + public PutObjectAclResponse putObjectAcl(PutObjectAclRequest putObjectAclRequest) throws NoSuchKeyException, AwsServiceException, + SdkClientException, S3Exception { + return delegate.putObjectAcl(putObjectAclRequest); + } + + @Override + public PutObjectAclResponse putObjectAcl(Consumer putObjectAclRequest) throws NoSuchKeyException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.putObjectAcl(putObjectAclRequest); + } + + @Override + public PutObjectLegalHoldResponse putObjectLegalHold(PutObjectLegalHoldRequest putObjectLegalHoldRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putObjectLegalHold(putObjectLegalHoldRequest); + } + + @Override + public PutObjectLegalHoldResponse putObjectLegalHold(Consumer putObjectLegalHoldRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putObjectLegalHold(putObjectLegalHoldRequest); + } + + @Override + public PutObjectLockConfigurationResponse putObjectLockConfiguration( + PutObjectLockConfigurationRequest putObjectLockConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putObjectLockConfiguration(putObjectLockConfigurationRequest); + } + + @Override + public PutObjectLockConfigurationResponse putObjectLockConfiguration( + Consumer putObjectLockConfigurationRequest + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putObjectLockConfiguration(putObjectLockConfigurationRequest); + } + + @Override + public PutObjectRetentionResponse putObjectRetention(PutObjectRetentionRequest putObjectRetentionRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putObjectRetention(putObjectRetentionRequest); + } + + @Override + public PutObjectRetentionResponse putObjectRetention(Consumer putObjectRetentionRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putObjectRetention(putObjectRetentionRequest); + } + + @Override + public PutObjectTaggingResponse putObjectTagging(PutObjectTaggingRequest putObjectTaggingRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.putObjectTagging(putObjectTaggingRequest); + } + + @Override + public PutObjectTaggingResponse putObjectTagging(Consumer putObjectTaggingRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putObjectTagging(putObjectTaggingRequest); + } + + @Override + public PutPublicAccessBlockResponse putPublicAccessBlock(PutPublicAccessBlockRequest putPublicAccessBlockRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putPublicAccessBlock(putPublicAccessBlockRequest); + } + + @Override + public PutPublicAccessBlockResponse putPublicAccessBlock(Consumer putPublicAccessBlockRequest) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.putPublicAccessBlock(putPublicAccessBlockRequest); + } + + @Override + public RestoreObjectResponse restoreObject(RestoreObjectRequest restoreObjectRequest) throws ObjectAlreadyInActiveTierErrorException, + AwsServiceException, SdkClientException, S3Exception { + return delegate.restoreObject(restoreObjectRequest); + } + + @Override + public RestoreObjectResponse restoreObject(Consumer restoreObjectRequest) + throws ObjectAlreadyInActiveTierErrorException, AwsServiceException, SdkClientException, S3Exception { + return delegate.restoreObject(restoreObjectRequest); + } + + @Override + public UploadPartResponse uploadPart(UploadPartRequest uploadPartRequest, RequestBody requestBody) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.uploadPart(uploadPartRequest, requestBody); + } + + @Override + public UploadPartResponse uploadPart(Consumer uploadPartRequest, RequestBody requestBody) + throws AwsServiceException, SdkClientException, S3Exception { + return delegate.uploadPart(uploadPartRequest, requestBody); + } + + @Override + public UploadPartResponse uploadPart(UploadPartRequest uploadPartRequest, Path sourcePath) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.uploadPart(uploadPartRequest, sourcePath); + } + + @Override + public UploadPartResponse uploadPart(Consumer uploadPartRequest, Path sourcePath) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.uploadPart(uploadPartRequest, sourcePath); + } + + @Override + public UploadPartCopyResponse uploadPartCopy(UploadPartCopyRequest uploadPartCopyRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.uploadPartCopy(uploadPartCopyRequest); + } + + @Override + public UploadPartCopyResponse uploadPartCopy(Consumer uploadPartCopyRequest) throws AwsServiceException, + SdkClientException, S3Exception { + return delegate.uploadPartCopy(uploadPartCopyRequest); + } + + @Override + public WriteGetObjectResponseResponse writeGetObjectResponse( + WriteGetObjectResponseRequest writeGetObjectResponseRequest, + RequestBody requestBody + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, requestBody); + } + + @Override + public WriteGetObjectResponseResponse writeGetObjectResponse( + Consumer writeGetObjectResponseRequest, + RequestBody requestBody + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, requestBody); + } + + @Override + public WriteGetObjectResponseResponse writeGetObjectResponse( + WriteGetObjectResponseRequest writeGetObjectResponseRequest, + Path sourcePath + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, sourcePath); + } + + @Override + public WriteGetObjectResponseResponse writeGetObjectResponse( + Consumer writeGetObjectResponseRequest, + Path sourcePath + ) throws AwsServiceException, SdkClientException, S3Exception { + return delegate.writeGetObjectResponse(writeGetObjectResponseRequest, sourcePath); + } + + @Override + public S3Utilities utilities() { + return delegate.utilities(); + } + + @Override + public S3Waiter waiter() { + return delegate.waiter(); } @Override - public void shutdown() { - delegate.shutdown(); + public S3ServiceClientConfiguration serviceClientConfiguration() { + return delegate.serviceClientConfiguration(); } } diff --git a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 839cfaa3c1045..98b8ef4cad2f3 100644 --- a/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -9,9 +9,10 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.services.s3.AmazonS3; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.services.s3.S3Client; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -89,12 +90,15 @@ public void testRepositoryCredentialsOverrideSecureCredentials() { assertThat(repositories.repository(repositoryName), instanceOf(S3Repository.class)); final S3Repository repository = (S3Repository) repositories.repository(repositoryName); - final AmazonS3 client = repository.createBlobStore().clientReference().client(); - assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class)); - - final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials(); - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + try (var clientReference = repository.createBlobStore().clientReference()) { + final ProxyS3RepositoryPlugin.ClientAndCredentials client = asInstanceOf( + ProxyS3RepositoryPlugin.ClientAndCredentials.class, + clientReference.client() + ); + final AwsCredentials credentials = client.credentials.resolveCredentials(); + assertThat(credentials.accessKeyId(), is("insecure_aws_key")); + assertThat(credentials.secretAccessKey(), is("insecure_aws_secret")); + } assertCriticalWarnings( "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release.", @@ -125,19 +129,19 @@ public void testReinitSecureCredentials() { final S3Repository repository = (S3Repository) repositories.repository(repositoryName); try (AmazonS3Reference clientReference = ((S3BlobStore) repository.blobStore()).clientReference()) { - final AmazonS3 client = clientReference.client(); + final S3Client client = clientReference.client(); assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class)); - final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials(); + final AwsCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.resolveCredentials(); if (hasInsecureSettings) { - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + assertThat(credentials.accessKeyId(), is("insecure_aws_key")); + assertThat(credentials.secretAccessKey(), is("insecure_aws_secret")); } else if ("other".equals(clientName)) { - assertThat(credentials.getAWSAccessKeyId(), is("secure_other_key")); - assertThat(credentials.getAWSSecretKey(), is("secure_other_secret")); + assertThat(credentials.accessKeyId(), is("secure_other_key")); + assertThat(credentials.secretAccessKey(), is("secure_other_secret")); } else { - assertThat(credentials.getAWSAccessKeyId(), is("secure_default_key")); - assertThat(credentials.getAWSSecretKey(), is("secure_default_secret")); + assertThat(credentials.accessKeyId(), is("secure_default_key")); + assertThat(credentials.secretAccessKey(), is("secure_default_secret")); } // new settings @@ -155,29 +159,29 @@ public void testReinitSecureCredentials() { // check the not-yet-closed client reference still has the same credentials if (hasInsecureSettings) { - assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + assertThat(credentials.accessKeyId(), is("insecure_aws_key")); + assertThat(credentials.secretAccessKey(), is("insecure_aws_secret")); } else if ("other".equals(clientName)) { - assertThat(credentials.getAWSAccessKeyId(), is("secure_other_key")); - assertThat(credentials.getAWSSecretKey(), is("secure_other_secret")); + assertThat(credentials.accessKeyId(), is("secure_other_key")); + assertThat(credentials.secretAccessKey(), is("secure_other_secret")); } else { - assertThat(credentials.getAWSAccessKeyId(), is("secure_default_key")); - assertThat(credentials.getAWSSecretKey(), is("secure_default_secret")); + assertThat(credentials.accessKeyId(), is("secure_default_key")); + assertThat(credentials.secretAccessKey(), is("secure_default_secret")); } } // check credentials have been updated try (AmazonS3Reference clientReference = ((S3BlobStore) repository.blobStore()).clientReference()) { - final AmazonS3 client = clientReference.client(); + final S3Client client = clientReference.client(); assertThat(client, instanceOf(ProxyS3RepositoryPlugin.ClientAndCredentials.class)); - final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.getCredentials(); + final AwsCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) client).credentials.resolveCredentials(); if (hasInsecureSettings) { - assertThat(newCredentials.getAWSAccessKeyId(), is("insecure_aws_key")); - assertThat(newCredentials.getAWSSecretKey(), is("insecure_aws_secret")); + assertThat(newCredentials.accessKeyId(), is("insecure_aws_key")); + assertThat(newCredentials.secretAccessKey(), is("insecure_aws_secret")); } else { - assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); - assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret")); + assertThat(newCredentials.accessKeyId(), is("new_secret_aws_key")); + assertThat(newCredentials.secretAccessKey(), is("new_secret_aws_secret")); } } @@ -252,27 +256,51 @@ S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatc return new ProxyS3Service(environment, nodeSettings, resourceWatcherService); } + /** + * This wrapper exposes a copy of the AWS credentials that the S3Client uses. + */ public static final class ClientAndCredentials extends AmazonS3Wrapper { - final AWSCredentialsProvider credentials; + final AwsCredentialsProvider credentials; + // The httpClient must be explicitly closed. Closure of the S3Client, which uses the httpClient, will not do so. + private final SdkHttpClient httpClient; - ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) { + ClientAndCredentials(S3Client delegate, SdkHttpClient httpClient, AwsCredentialsProvider credentials) { super(delegate); + this.httpClient = httpClient; this.credentials = credentials; } + + @Override + public String serviceName() { + return "ClientAndCredentials"; + } + + @Override + public void close() { + super.close(); + httpClient.close(); + } } + /** + * A {@link S3Service} wrapper that supports access to a copy of the credentials given to the S3Client. + */ public static final class ProxyS3Service extends S3Service { private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); ProxyS3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { - super(environment, nodeSettings, resourceWatcherService); + super(environment, nodeSettings, resourceWatcherService, () -> null); } @Override - AmazonS3 buildClient(final S3ClientSettings clientSettings) { - final AmazonS3 client = super.buildClient(clientSettings); - return new ClientAndCredentials(client, buildCredentials(logger, clientSettings, webIdentityTokenCredentialsProvider)); + S3Client buildClient(final S3ClientSettings clientSettings, SdkHttpClient httpClient) { + final S3Client client = super.buildClient(clientSettings, httpClient); + return new ClientAndCredentials( + client, + httpClient, + buildCredentials(logger, clientSettings, webIdentityTokenCredentialsProvider) + ); } } diff --git a/modules/repository-s3/qa/third-party/build.gradle b/modules/repository-s3/qa/third-party/build.gradle index 49cdd2665667f..acf912e6c0136 100644 --- a/modules/repository-s3/qa/third-party/build.gradle +++ b/modules/repository-s3/qa/third-party/build.gradle @@ -12,6 +12,12 @@ dependencies { testImplementation project(':test:fixtures:minio-fixture') testImplementation project(':test:framework') testImplementation project(':server') + + testImplementation "software.amazon.awssdk:aws-core:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:http-client-spi:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:s3:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:sdk-core:${versions.awsv2sdk}" + testImplementation "software.amazon.awssdk:utils:${versions.awsv2sdk}" } boolean useFixture = false diff --git a/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 4cebedebfba07..4c277759c7571 100644 --- a/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -8,11 +8,12 @@ */ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; -import com.amazonaws.services.s3.model.MultipartUpload; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest; +import software.amazon.awssdk.services.s3.model.MultipartUpload; +import software.amazon.awssdk.services.s3.model.S3Exception; + import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; @@ -176,8 +177,9 @@ BytesReference readRegister() { } List listMultipartUploads() { - return client.listMultipartUploads(new ListMultipartUploadsRequest(bucketName).withPrefix(registerBlobPath)) - .getMultipartUploads(); + return client.listMultipartUploads( + ListMultipartUploadsRequest.builder().bucket(bucketName).prefix(registerBlobPath).build() + ).uploads(); } } @@ -191,11 +193,11 @@ List listMultipartUploads() { assertEquals(bytes1, testHarness.readRegister()); assertArrayEquals( bytes1.array(), - client.getObject(new GetObjectRequest(bucketName, registerBlobPath)).getObjectContent().readAllBytes() + client.getObject(GetObjectRequest.builder().bucket(bucketName).key(registerBlobPath).build()).readAllBytes() ); // a fresh ongoing upload blocks other CAS attempts - client.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, registerBlobPath)); + client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(bucketName).key(registerBlobPath).build()); assertThat(testHarness.listMultipartUploads(), hasSize(1)); assertFalse(testHarness.tryCompareAndSet(bytes1, bytes2)); @@ -203,10 +205,7 @@ List listMultipartUploads() { assertThat(multipartUploads, hasSize(1)); // repo clock may not be exactly aligned with ours, but it should be close - final var age = blobStore.getThreadPool().absoluteTimeInMillis() - multipartUploads.get(0) - .getInitiated() - .toInstant() - .toEpochMilli(); + final var age = blobStore.getThreadPool().absoluteTimeInMillis() - multipartUploads.get(0).initiated().toEpochMilli(); final var ageRangeMillis = TimeValue.timeValueMinutes(1).millis(); assertThat(age, allOf(greaterThanOrEqualTo(-ageRangeMillis), lessThanOrEqualTo(ageRangeMillis))); @@ -224,8 +223,10 @@ List listMultipartUploads() { } public void testReadFromPositionLargerThanBlobLength() { - testReadFromPositionLargerThanBlobLength( - e -> asInstanceOf(AmazonS3Exception.class, e.getCause()).getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus() - ); + testReadFromPositionLargerThanBlobLength(e -> { + final var s3Exception = asInstanceOf(S3Exception.class, e.getCause()); + return s3Exception.statusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus() + && "InvalidRange".equals(s3Exception.awsErrorDetails().errorCode()); + }); } } diff --git a/modules/repository-s3/qa/web-identity-token/build.gradle b/modules/repository-s3/qa/web-identity-token/build.gradle index 4346e1f4547e1..b87c52663d241 100644 --- a/modules/repository-s3/qa/web-identity-token/build.gradle +++ b/modules/repository-s3/qa/web-identity-token/build.gradle @@ -11,8 +11,10 @@ dependencies { testImplementation project(':modules:repository-s3') testImplementation project(':test:framework') testImplementation project(':server') + testImplementation "software.amazon.awssdk:auth:${versions.awsv2sdk}" + implementation "software.amazon.awssdk:identity-spi:${versions.awsv2sdk}" } tasks.named("test").configure { - systemProperty 'es.allow_insecure_settings', 'true' + environment 'AWS_REGION', 'es-test-region' } diff --git a/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index 2698eb718ded0..03ac986d038f7 100644 --- a/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/qa/web-identity-token/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -9,8 +9,9 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; + import com.sun.net.httpserver.HttpServer; import org.apache.logging.log4j.LogManager; @@ -43,6 +44,7 @@ import java.util.Arrays; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -108,7 +110,9 @@ private static HttpServer getHttpServer(Consumer webIdentityTokenCheck) """, ROLE_ARN, ROLE_NAME, - ZonedDateTime.now().plusDays(1L).format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ")) + ZonedDateTime.now(Clock.systemUTC()) + .plusSeconds(1L) // short expiry to force a reload + .format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssZ")) ).getBytes(StandardCharsets.UTF_8); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); @@ -121,7 +125,7 @@ private static HttpServer getHttpServer(Consumer webIdentityTokenCheck) @SuppressForbidden(reason = "HTTP server is used for testing") private static Map getSystemProperties(HttpServer httpServer) { return Map.of( - "com.amazonaws.sdk.stsMetadataServiceEndpointOverride", + "org.elasticsearch.repositories.s3.stsEndpointOverride", "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort() ); } @@ -130,9 +134,9 @@ private static Map environmentVariables() { return Map.of("AWS_WEB_IDENTITY_TOKEN_FILE", "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", "AWS_ROLE_ARN", ROLE_ARN); } - private static void assertCredentials(AWSCredentials credentials) { - Assert.assertFalse(credentials.getAWSAccessKeyId().isEmpty()); - Assert.assertFalse(credentials.getAWSSecretKey().isEmpty()); + private static void assertCredentials(AwsCredentials credentials) { + Assert.assertFalse(credentials.accessKeyId().isEmpty()); + Assert.assertFalse(credentials.secretAccessKey().isEmpty()); } @SuppressForbidden(reason = "HTTP server is used for testing") @@ -152,15 +156,15 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { resourceWatcherService ); try { - AWSCredentials credentials = S3Service.buildCredentials( + AwsCredentials credentials = S3Service.buildCredentials( LogManager.getLogger(S3Service.class), S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8)), webIdentityTokenCredentialsProvider - ).getCredentials(); + ).resolveCredentials(); assertCredentials(credentials); } finally { - webIdentityTokenCredentialsProvider.shutdown(); + webIdentityTokenCredentialsProvider.close(); httpServer.stop(0); } } @@ -198,12 +202,12 @@ public void testPickUpNewWebIdentityTokenWhenItsChanged() throws Exception { resourceWatcherService ); try { - AWSCredentialsProvider awsCredentialsProvider = S3Service.buildCredentials( + AwsCredentialsProvider awsCredentialsProvider = S3Service.buildCredentials( LogManager.getLogger(S3Service.class), S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8)), webIdentityTokenCredentialsProvider ); - assertCredentials(awsCredentialsProvider.getCredentials()); + assertCredentials(awsCredentialsProvider.resolveCredentials()); var latch = new CountDownLatch(1); String newWebIdentityToken = "88f84342080d4671a511e10ae905b2b0"; @@ -213,41 +217,14 @@ public void testPickUpNewWebIdentityTokenWhenItsChanged() throws Exception { } }); Files.writeString(environment.configDir().resolve("repository-s3/aws-web-identity-token-file"), newWebIdentityToken); - - safeAwait(latch); - assertCredentials(awsCredentialsProvider.getCredentials()); + do { + // re-resolve credentials in order to trigger a refresh + assertCredentials(awsCredentialsProvider.resolveCredentials()); + } while (latch.await(500, TimeUnit.MILLISECONDS) == false); + assertCredentials(awsCredentialsProvider.resolveCredentials()); } finally { - webIdentityTokenCredentialsProvider.shutdown(); + webIdentityTokenCredentialsProvider.close(); httpServer.stop(0); } } - - public void testSupportRegionalizedEndpoints() throws Exception { - Map environmentVariables = Map.of( - "AWS_WEB_IDENTITY_TOKEN_FILE", - "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", - "AWS_ROLE_ARN", - ROLE_ARN, - "AWS_STS_REGIONAL_ENDPOINTS", - "regional", - "AWS_REGION", - "us-west-2" - ); - Map systemProperties = Map.of(); - - var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( - getEnvironment(), - environmentVariables::get, - systemProperties::getOrDefault, - Clock.systemUTC(), - resourceWatcherService - ); - // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com" - // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends - // on the system environment that we can't change in the test. So we just verify we that we called `withRegion` - // on stsClientBuilder which should internally correctly configure the endpoint when the STS client is built. - assertEquals("us-west-2", webIdentityTokenCredentialsProvider.getStsRegion()); - - webIdentityTokenCredentialsProvider.shutdown(); - } } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index e55668adea101..f02ca3f16d734 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -149,9 +149,9 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.PUT_OBJECT), equalTo(4L * batch)); assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.PUT_OBJECT), equalTo(batch)); assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.PUT_OBJECT), equalTo(0L)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(3L * batch)); assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.PUT_OBJECT), equalTo(2L * batch)); - assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(3L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.PUT_OBJECT), equalTo(2L * batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); } @@ -169,9 +169,9 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.GET_OBJECT), equalTo(2L * batch)); assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(2L * batch)); assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(2L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); @@ -192,9 +192,9 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch)); assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); - assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); } @@ -232,8 +232,8 @@ public void testMetricsForRequestRangeNotSatisfied() { assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.GET_OBJECT), equalTo(3 * batch)); assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(3 * batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(3 * batch)); assertThat( getLongCounterValue(plugin, METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, Operation.GET_OBJECT), equalTo(batch) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 91be1051f1776..90b33842c93ce 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -9,11 +9,12 @@ package org.elasticsearch.repositories.s3; import fixture.s3.S3HttpHandler; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest; +import software.amazon.awssdk.services.s3.model.MultipartUpload; -import com.amazonaws.http.AmazonHttpClient; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; -import com.amazonaws.services.s3.model.MultipartUpload; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; @@ -79,18 +80,20 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static fixture.aws.AwsCredentialsUtils.isValidAwsV4SignedAuthorizationHeader; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_TOTAL; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomNonDataPurpose; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -99,7 +102,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.startsWith; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -109,7 +111,6 @@ public class S3BlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTes private static final TimeValue TEST_COOLDOWN_PERIOD = TimeValue.timeValueSeconds(10L); private String region; - private String signerOverride; private final AtomicBoolean shouldFailCompleteMultipartUploadRequest = new AtomicBoolean(); @Override @@ -117,11 +118,6 @@ public void setUp() throws Exception { if (randomBoolean()) { region = "test-region"; } - if (region != null && randomBoolean()) { - signerOverride = randomFrom("AWS3SignerType", "AWS4SignerType"); - } else if (randomBoolean()) { - signerOverride = "AWS3SignerType"; - } shouldFailCompleteMultipartUploadRequest.set(false); super.setUp(); } @@ -169,17 +165,12 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { final Settings.Builder builder = Settings.builder() .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that verify an exact wait time .put(S3ClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl()) - // Disable request throttling because some random values in tests might generate too many failures for the S3 client - .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false) .put(super.nodeSettings(nodeOrdinal, otherSettings)) .setSecureSettings(secureSettings); if (randomBoolean()) { builder.put(S3ClientSettings.DISABLE_CHUNKED_ENCODING.getConcreteSettingForNamespace("test").getKey(), randomBoolean()); } - if (signerOverride != null) { - builder.put(S3ClientSettings.SIGNER_OVERRIDE.getConcreteSettingForNamespace("test").getKey(), signerOverride); - } if (region != null) { builder.put(S3ClientSettings.REGION.getConcreteSettingForNamespace("test").getKey(), region); } @@ -490,30 +481,34 @@ public void testMultipartUploadCleanup() { try (var clientRef = blobStore.clientReference()) { final var danglingBlobName = randomIdentifier(); - final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( - blobStore.bucket(), - blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName - ); - initiateMultipartUploadRequest.putCustomQueryParameter( - S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, - OperationPurpose.SNAPSHOT_DATA.getKey() - ); - final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest); + final var initiateMultipartUploadRequest = CreateMultipartUploadRequest.builder() + .bucket(blobStore.bucket()) + .key(blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName) + .overrideConfiguration( + AwsRequestOverrideConfiguration.builder() + .putRawQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, randomPurpose().getKey()) + .build() + ) + .build(); - final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix( - repository.basePath().buildAsString() - ); - listMultipartUploadsRequest.putCustomQueryParameter( - S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, - OperationPurpose.SNAPSHOT_DATA.getKey() - ); + final var multipartUploadResult = clientRef.client().createMultipartUpload(initiateMultipartUploadRequest); + + final var listMultipartUploadsRequest = ListMultipartUploadsRequest.builder() + .bucket(blobStore.bucket()) + .prefix(repository.basePath().buildAsString()) + .overrideConfiguration( + AwsRequestOverrideConfiguration.builder() + .putRawQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, randomPurpose().getKey()) + .build() + ) + .build(); assertEquals( - List.of(multipartUploadResult.getUploadId()), + List.of(multipartUploadResult.uploadId()), clientRef.client() .listMultipartUploads(listMultipartUploadsRequest) - .getMultipartUploads() + .uploads() .stream() - .map(MultipartUpload::getUploadId) + .map(MultipartUpload::uploadId) .toList() ); @@ -535,7 +530,7 @@ public void testMultipartUploadCleanup() { Level.INFO, Strings.format( "cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]", - multipartUploadResult.getUploadId(), + multipartUploadResult.uploadId(), repoName, danglingBlobName ) @@ -553,9 +548,9 @@ public void match(LogEvent event) { assertThat( clientRef.client() .listMultipartUploads(listMultipartUploadsRequest) - .getMultipartUploads() + .uploads() .stream() - .map(MultipartUpload::getUploadId) + .map(MultipartUpload::uploadId) .toList(), empty() ); @@ -619,23 +614,16 @@ protected class S3BlobStoreHttpHandler extends S3HttpHandler implements BlobStor @Override public void handle(final HttpExchange exchange) throws IOException { - validateAuthHeader(exchange); + assertTrue( + isValidAwsV4SignedAuthorizationHeader( + "test_access_key", + Objects.requireNonNullElse(region, "us-east-1"), + "s3", + exchange.getRequestHeaders().getFirst("Authorization") + ) + ); super.handle(exchange); } - - private void validateAuthHeader(HttpExchange exchange) { - final String authorizationHeaderV4 = exchange.getRequestHeaders().getFirst("Authorization"); - final String authorizationHeaderV3 = exchange.getRequestHeaders().getFirst("X-amzn-authorization"); - - if ("AWS3SignerType".equals(signerOverride)) { - assertThat(authorizationHeaderV3, startsWith("AWS3")); - } else if ("AWS4SignerType".equals(signerOverride)) { - assertThat(authorizationHeaderV4, containsString("aws4_request")); - } - if (region != null && authorizationHeaderV4 != null) { - assertThat(authorizationHeaderV4, containsString("/" + region + "/s3/")); - } - } } /** @@ -647,14 +635,31 @@ private void validateAuthHeader(HttpExchange exchange) { @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") protected static class S3ErroneousHttpHandler extends ErroneousHttpHandler { + // S3 SDK stops retrying after TOKEN_BUCKET_SIZE/DEFAULT_EXCEPTION_TOKEN_COST == 500/5 == 100 failures in quick succession + // see software.amazon.awssdk.retries.DefaultRetryStrategy.Legacy.TOKEN_BUCKET_SIZE + // see software.amazon.awssdk.retries.DefaultRetryStrategy.Legacy.DEFAULT_EXCEPTION_TOKEN_COST + private final Semaphore failurePermits = new Semaphore(99); + S3ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { super(delegate, maxErrorsPerRequest); } + /** + * Bypasses {@link ErroneousHttpHandler#handle} once we exhaust {@link #failurePermits} because S3 will start rate limiting. + */ + @Override + public void handle(HttpExchange exchange) throws IOException { + if (failurePermits.tryAcquire()) { + super.handle(exchange); + } else { + delegate.handle(exchange); + } + } + @Override protected String requestUniqueId(final HttpExchange exchange) { // Amazon SDK client provides a unique ID per request - return exchange.getRequestHeaders().getFirst(AmazonHttpClient.HEADER_SDK_TRANSACTION_ID); + return exchange.getRequestHeaders().getFirst(ApplyTransactionIdStage.HEADER_SDK_TRANSACTION_ID); } } @@ -681,6 +686,7 @@ public void handle(HttpExchange exchange) throws IOException { assertTrue(s3Request.hasQueryParamOnce(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE)); } if (shouldFailCompleteMultipartUploadRequest.get() && s3Request.isCompleteMultipartUploadRequest()) { + trackRequest("PutMultipartObject"); try (exchange) { drainInputStream(exchange.getRequestBody()); exchange.sendResponseHeaders( diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java index 5d82892cd3d71..dfe3bbbc2613e 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.s3; +import fixture.aws.DynamicRegionSupplier; import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -20,7 +21,8 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -import static fixture.aws.AwsCredentialsUtils.ANY_REGION; +import java.util.function.Supplier; + import static fixture.aws.AwsCredentialsUtils.fixedAccessKey; @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @@ -34,10 +36,17 @@ public class RepositoryS3BasicCredentialsRestIT extends AbstractRepositoryS3Rest private static final String SECRET_KEY = PREFIX + "secret-key"; private static final String CLIENT = "basic_credentials_client"; - private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, fixedAccessKey(ACCESS_KEY, ANY_REGION, "s3")); + private static final Supplier regionSupplier = new DynamicRegionSupplier(); + private static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + BUCKET, + BASE_PATH, + fixedAccessKey(ACCESS_KEY, regionSupplier, "s3") + ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") + .systemProperty("aws.region", regionSupplier) .keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY) .keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY) .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java index 66fcdc4ececf4..d365fde6eec8c 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.s3; import fixture.aws.DynamicAwsCredentials; +import fixture.aws.DynamicRegionSupplier; import fixture.aws.imds.Ec2ImdsHttpFixture; import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; @@ -25,6 +26,7 @@ import org.junit.rules.TestRule; import java.util.Set; +import java.util.function.Supplier; @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 @@ -35,7 +37,8 @@ public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTe private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "ecs_credentials_client"; - private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3"); + private static final Supplier regionSupplier = new DynamicRegionSupplier(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(regionSupplier, "s3"); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicCredentials::addValidCredentials) @@ -48,6 +51,7 @@ public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTe .module("repository-s3") .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) .environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> ec2ImdsHttpFixture.getAddress() + "/ecs_credentials_endpoint") + .environment("AWS_REGION", regionSupplier) // Region is supplied by environment variable when running in ECS .build(); @ClassRule diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java deleted file mode 100644 index 29031da10665d..0000000000000 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.repositories.s3; - -import fixture.aws.DynamicAwsCredentials; -import fixture.aws.imds.Ec2ImdsHttpFixture; -import fixture.aws.imds.Ec2ImdsServiceBuilder; -import fixture.aws.imds.Ec2ImdsVersion; -import fixture.s3.S3HttpFixture; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; - -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) -@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 -public class RepositoryS3ImdsV1CredentialsRestIT extends AbstractRepositoryS3RestTestCase { - - private static final String PREFIX = getIdentifierPrefix("RepositoryS3ImdsV1CredentialsRestIT"); - private static final String BUCKET = PREFIX + "bucket"; - private static final String BASE_PATH = PREFIX + "base_path"; - private static final String CLIENT = "imdsv1_credentials_client"; - - private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3"); - - private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicCredentials::addValidCredentials) - ); - - private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); - - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .module("repository-s3") - .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) - .systemProperty(Ec2ImdsHttpFixture.ENDPOINT_OVERRIDE_SYSPROP_NAME, ec2ImdsHttpFixture::getAddress) - .build(); - - @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(ec2ImdsHttpFixture).around(s3Fixture).around(cluster); - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } - - @Override - protected String getBucketName() { - return BUCKET; - } - - @Override - protected String getBasePath() { - return BASE_PATH; - } - - @Override - protected String getClientName() { - return CLIENT; - } -} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java index d6fc86a0afe34..20e53561cdbc8 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.s3; import fixture.aws.DynamicAwsCredentials; +import fixture.aws.DynamicRegionSupplier; import fixture.aws.imds.Ec2ImdsHttpFixture; import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; @@ -24,6 +25,8 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.util.function.Supplier; + @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3RestTestCase { @@ -33,10 +36,12 @@ public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3Res private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "imdsv2_credentials_client"; - private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3"); + private static final Supplier regionSupplier = new DynamicRegionSupplier(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(regionSupplier, "s3"); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).newCredentialsConsumer(dynamicCredentials::addValidCredentials) + .instanceIdentityDocument((b, p) -> b.field("region", regionSupplier.get())) ); private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); @@ -44,7 +49,7 @@ public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3Res public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) - .systemProperty(Ec2ImdsHttpFixture.ENDPOINT_OVERRIDE_SYSPROP_NAME, ec2ImdsHttpFixture::getAddress) + .systemProperty(Ec2ImdsHttpFixture.ENDPOINT_OVERRIDE_SYSPROP_NAME_SDK2, ec2ImdsHttpFixture::getAddress) .build(); @ClassRule diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java index 0faea267a4221..28bd88da8ddd3 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java @@ -92,10 +92,7 @@ public void testReloadCredentialsFromKeystore() throws IOException { // Ensure that initial credentials now invalid final var accessDeniedException2 = expectThrows(ResponseException.class, () -> client().performRequest(verifyRequest)); assertThat(accessDeniedException2.getResponse().getStatusLine().getStatusCode(), equalTo(500)); - assertThat( - accessDeniedException2.getMessage(), - allOf(containsString("Access denied"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied")) - ); + assertThat(accessDeniedException2.getMessage(), allOf(containsString("Access denied"), containsString("Status Code: 403"))); // Set up refreshed credentials keystoreSettings.put("s3.client.default.access_key", accessKey2); diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java index a58645363b0e9..324a6d7e01f7a 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java @@ -10,7 +10,9 @@ package org.elasticsearch.repositories.s3; import fixture.aws.DynamicAwsCredentials; +import fixture.aws.DynamicRegionSupplier; import fixture.aws.sts.AwsStsHttpFixture; +import fixture.aws.sts.AwsStsHttpHandler; import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -23,6 +25,8 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.util.function.Supplier; + @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTestCase { @@ -32,7 +36,8 @@ public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTe private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "sts_credentials_client"; - private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials("*", "s3"); + private static final Supplier regionSupplier = new DynamicRegionSupplier(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(regionSupplier, "s3"); private static final S3HttpFixture s3HttpFixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); @@ -49,19 +54,22 @@ public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTe public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") .setting("s3.client." + CLIENT + ".endpoint", s3HttpFixture::getAddress) - .systemProperty( - "com.amazonaws.sdk.stsMetadataServiceEndpointOverride", - () -> stsHttpFixture.getAddress() + "/assume-role-with-web-identity" - ) + .systemProperty("org.elasticsearch.repositories.s3.stsEndpointOverride", stsHttpFixture::getAddress) .configFile( S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION, Resource.fromString(WEB_IDENTITY_TOKEN_FILE_CONTENTS) ) - .environment("AWS_WEB_IDENTITY_TOKEN_FILE", S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION) - // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the - // S3HttpFixtureWithSTS fixture - .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") - .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + // When running in EKS with container identity the environment variable `AWS_WEB_IDENTITY_TOKEN_FILE` will point to a file which + // ES cannot access due to its security policy; we override it with `${ES_CONF_PATH}/repository-s3/aws-web-identity-token-file` + // and require the user to set up a symlink at this location. Thus we can set `AWS_WEB_IDENTITY_TOKEN_FILE` to any old path: + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", () -> randomIdentifier() + "/" + randomIdentifier()) + // The AWS STS SDK requires the role ARN, it also accepts a session name but will make one up if it's not set. + // These are checked in AwsStsHttpHandler: + .environment("AWS_ROLE_ARN", AwsStsHttpHandler.ROLE_ARN) + .environment("AWS_ROLE_SESSION_NAME", AwsStsHttpHandler.ROLE_NAME) + // SDKv2 always uses regional endpoints + .environment("AWS_STS_REGIONAL_ENDPOINTS", () -> randomBoolean() ? "regional" : null) + .environment("AWS_REGION", regionSupplier) .build(); @ClassRule diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java index e098422ab8b98..90666284a25a3 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java @@ -9,22 +9,24 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.services.s3.S3Client; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Releasable; /** - * Handles the shutdown of the wrapped {@link AmazonS3Client} using reference - * counting. + * Handles the shutdown of the wrapped {@link S3Client} using reference counting. */ public class AmazonS3Reference extends AbstractRefCounted implements Releasable { - private final AmazonS3 client; + private final S3Client client; + /** The S3Client shutdown logic does not handle shutdown of the HttpClient passed into it. So we must manually handle that. */ + private final SdkHttpClient httpClient; - AmazonS3Reference(AmazonS3 client) { + AmazonS3Reference(S3Client client, SdkHttpClient httpClient) { this.client = client; + this.httpClient = httpClient; } /** @@ -39,13 +41,14 @@ public void close() { * Returns the underlying `AmazonS3` client. All method calls are permitted BUT * NOT shutdown. Shutdown is called when reference count reaches 0. */ - public AmazonS3 client() { + public S3Client client() { return client; } @Override protected void closeInternal() { - client.shutdown(); + client.close(); + httpClient.close(); } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/HttpScheme.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/HttpScheme.java new file mode 100644 index 0000000000000..cbfe3780f2c8c --- /dev/null +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/HttpScheme.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +public enum HttpScheme { + HTTP("http"), + HTTPS("https"); + + private final String schemeString; + + HttpScheme(String schemeString) { + this.schemeString = schemeString; + } + + public String getSchemeString() { + return schemeString; + } +} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesser.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesser.java new file mode 100644 index 0000000000000..6de784c7a7dc2 --- /dev/null +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesser.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.SuppressForbidden; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + +/** + * Simple mapping from S3 endpoint hostnames to AWS region names, in case the user does not specify a region. This allows Elasticsearch to + * guess an appropriate AWS region name, and keep working, if a user does not specify one. This is a best-effort attempt for backwards + * compatibility: AWS SDK V1 would extrapolate the correct region but V2 considers it a fatal error not to be told the region name + * explicitly. + *

+ * The mapping is loaded from the JAR resource named {@code regions_by_endpoint.txt}. + *

+ */ +class RegionFromEndpointGuesser { + private static final Map regionsByEndpoint; + + static { + try ( + var resourceStream = readFromJarResourceUrl(RegionFromEndpointGuesser.class.getResource("regions_by_endpoint.txt")); + var reader = new BufferedReader(new InputStreamReader(resourceStream, StandardCharsets.UTF_8)) + ) { + final var builder = new HashMap(); + while (true) { + final var line = reader.readLine(); + if (line == null) { + break; + } + final var parts = line.split(" +"); + if (parts.length != 2) { + throw new IllegalStateException("invalid regions_by_endpoint.txt line: " + line); + } + builder.put(parts[1], parts[0]); + } + regionsByEndpoint = Map.copyOf(builder); + } catch (Exception e) { + assert false : e; + throw new IllegalStateException("could not read regions_by_endpoint.txt", e); + } + } + + @SuppressForbidden(reason = "reads resource from jar") + private static InputStream readFromJarResourceUrl(URL source) throws IOException { + if (source == null) { + throw new FileNotFoundException("links resource not found at [" + source + "]"); + } + return source.openStream(); + } + + /** + * @return a guess at the region name for the given S3 endpoint, or {@code null} if the endpoint is not recognised. + */ + @Nullable + static String guessRegion(@Nullable String endpoint) { + if (endpoint == null) { + return null; + } + + if (endpoint.startsWith("https://")) { + endpoint = endpoint.substring("https://".length()); + } + + return regionsByEndpoint.get(endpoint); + } + +} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java deleted file mode 100644 index ab9ba64d1fa69..0000000000000 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.repositories.s3; - -import com.amazonaws.auth.AWSCredentials; - -import java.util.Objects; - -class S3BasicCredentials implements AWSCredentials { - - private final String accessKey; - - private final String secretKey; - - S3BasicCredentials(String accessKey, String secretKey) { - this.accessKey = accessKey; - this.secretKey = secretKey; - } - - @Override - public final String getAWSAccessKeyId() { - return accessKey; - } - - @Override - public final String getAWSSecretKey() { - return secretKey; - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - final S3BasicCredentials that = (S3BasicCredentials) o; - return accessKey.equals(that.accessKey) && secretKey.equals(that.secretKey); - } - - @Override - public int hashCode() { - return Objects.hash(accessKey, secretKey); - } -} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java deleted file mode 100644 index 0dee56938c408..0000000000000 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.repositories.s3; - -import com.amazonaws.auth.AWSSessionCredentials; - -import java.util.Objects; - -final class S3BasicSessionCredentials extends S3BasicCredentials implements AWSSessionCredentials { - - private final String sessionToken; - - S3BasicSessionCredentials(String accessKey, String secretKey, String sessionToken) { - super(accessKey, secretKey); - this.sessionToken = sessionToken; - } - - @Override - public String getSessionToken() { - return sessionToken; - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - final S3BasicSessionCredentials that = (S3BasicSessionCredentials) o; - return sessionToken.equals(that.sessionToken) - && getAWSAccessKeyId().equals(that.getAWSAccessKeyId()) - && getAWSSecretKey().equals(that.getAWSSecretKey()); - } - - @Override - public int hashCode() { - return Objects.hash(sessionToken, getAWSAccessKeyId(), getAWSSecretKey()); - } -} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 74884ccf76896..5c9ca53a2ba70 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -9,25 +9,27 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.GetObjectMetadataRequest; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; -import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.MultipartUpload; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; -import com.amazonaws.util.ValidationUtils; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.MultipartUpload; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.SdkPartType; +import software.amazon.awssdk.services.s3.model.ServerSideEncryption; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; @@ -62,20 +64,21 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.NoSuchFileException; import java.time.Instant; import java.util.ArrayList; -import java.util.Date; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.blobstore.support.BlobContainerUtils.getRegisterUsingConsistentRead; @@ -160,8 +163,10 @@ public void writeMetadataBlob( assert purpose != OperationPurpose.SNAPSHOT_DATA && BlobContainer.assertPurposeConsistency(purpose, blobName) : purpose; final String absoluteBlobKey = buildKey(blobName); try ( - AmazonS3Reference clientReference = blobStore.clientReference(); - ChunkedBlobOutputStream out = new ChunkedBlobOutputStream<>(blobStore.bigArrays(), blobStore.bufferSizeInBytes()) { + ChunkedBlobOutputStream out = new ChunkedBlobOutputStream<>( + blobStore.bigArrays(), + blobStore.bufferSizeInBytes() + ) { private final SetOnce uploadId = new SetOnce<>(); @@ -176,13 +181,17 @@ private void flushBuffer(boolean lastPart) throws IOException { } if (flushedBytes == 0L) { assert lastPart == false : "use single part upload if there's only a single part"; - uploadId.set( - SocketAccess.doPrivileged( - () -> clientReference.client() - .initiateMultipartUpload(initiateMultiPartUpload(purpose, absoluteBlobKey)) - .getUploadId() - ) - ); + try (var clientReference = blobStore.clientReference()) { + uploadId.set( + SocketAccess.doPrivileged( + () -> clientReference.client() + .createMultipartUpload( + createMultipartUpload(purpose, Operation.PUT_MULTIPART_OBJECT, absoluteBlobKey) + ) + .uploadId() + ) + ); + } if (Strings.isEmpty(uploadId.get())) { throw new IOException("Failed to initialize multipart upload " + absoluteBlobKey); } @@ -190,17 +199,21 @@ private void flushBuffer(boolean lastPart) throws IOException { assert lastPart == false || successful : "must only write last part if successful"; final UploadPartRequest uploadRequest = createPartUploadRequest( purpose, - buffer.bytes().streamInput(), uploadId.get(), parts.size() + 1, absoluteBlobKey, buffer.size(), lastPart ); - final UploadPartResult uploadResponse = SocketAccess.doPrivileged( - () -> clientReference.client().uploadPart(uploadRequest) - ); - finishPart(uploadResponse.getPartETag()); + final InputStream partContentStream = buffer.bytes().streamInput(); + final UploadPartResponse uploadResponse; + try (var clientReference = blobStore.clientReference()) { + uploadResponse = SocketAccess.doPrivileged( + () -> clientReference.client() + .uploadPart(uploadRequest, RequestBody.fromInputStream(partContentStream, buffer.size())) + ); + } + finishPart(CompletedPart.builder().partNumber(parts.size() + 1).eTag(uploadResponse.eTag()).build()); } @Override @@ -209,14 +222,23 @@ protected void onCompletion() throws IOException { writeBlob(purpose, blobName, buffer.bytes(), failIfAlreadyExists); } else { flushBuffer(true); - final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest( - blobStore.bucket(), - absoluteBlobKey, - uploadId.get(), - parts + final var completeMultipartUploadRequestBuilder = CompleteMultipartUploadRequest.builder() + .bucket(blobStore.bucket()) + .key(absoluteBlobKey) + .uploadId(uploadId.get()) + .multipartUpload(b -> b.parts(parts)); + S3BlobStore.configureRequestForMetrics( + completeMultipartUploadRequestBuilder, + blobStore, + Operation.PUT_MULTIPART_OBJECT, + purpose ); - S3BlobStore.configureRequestForMetrics(complRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); - SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); + final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build(); + try (var clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid( + () -> clientReference.client().completeMultipartUpload(completeMultipartUploadRequest) + ); + } } } @@ -236,14 +258,12 @@ protected void onFailure() { // This method is largely copied from AmazonS3Client#doesObjectExist with the ability to instrument the getObjectMetadataRequest private boolean doesObjectExist(OperationPurpose purpose, AmazonS3Reference clientReference, String bucketName, String objectName) { try { - ValidationUtils.assertStringNotEmpty(bucketName, "bucketName"); - ValidationUtils.assertStringNotEmpty(objectName, "objectName"); - final var getObjectMetadataRequest = new GetObjectMetadataRequest(bucketName, objectName); - S3BlobStore.configureRequestForMetrics(getObjectMetadataRequest, blobStore, Operation.HEAD_OBJECT, purpose); - clientReference.client().getObjectMetadata(getObjectMetadataRequest); + final var headObjectRequestBuilder = HeadObjectRequest.builder().bucket(bucketName).key(objectName); + S3BlobStore.configureRequestForMetrics(headObjectRequestBuilder, blobStore, Operation.HEAD_OBJECT, purpose); + clientReference.client().headObject(headObjectRequestBuilder.build()); return true; - } catch (AmazonS3Exception e) { - if (e.getStatusCode() == 404) { + } catch (S3Exception e) { + if (e.statusCode() == 404) { return false; } throw e; @@ -252,44 +272,46 @@ private boolean doesObjectExist(OperationPurpose purpose, AmazonS3Reference clie private UploadPartRequest createPartUploadRequest( OperationPurpose purpose, - InputStream stream, String uploadId, int number, String blobName, long size, boolean lastPart ) { - final UploadPartRequest uploadRequest = new UploadPartRequest(); - uploadRequest.setBucketName(blobStore.bucket()); - uploadRequest.setKey(blobName); - uploadRequest.setUploadId(uploadId); - uploadRequest.setPartNumber(number); - uploadRequest.setInputStream(stream); - S3BlobStore.configureRequestForMetrics(uploadRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); - uploadRequest.setPartSize(size); - uploadRequest.setLastPart(lastPart); - return uploadRequest; + final var uploadPartRequestBuilder = UploadPartRequest.builder(); + uploadPartRequestBuilder.bucket(blobStore.bucket()); + uploadPartRequestBuilder.key(blobName); + uploadPartRequestBuilder.uploadId(uploadId); + uploadPartRequestBuilder.partNumber(number); + uploadPartRequestBuilder.contentLength(size); + uploadPartRequestBuilder.sdkPartType(lastPart ? SdkPartType.LAST : SdkPartType.DEFAULT); + S3BlobStore.configureRequestForMetrics(uploadPartRequestBuilder, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); + return uploadPartRequestBuilder.build(); } private void abortMultiPartUpload(OperationPurpose purpose, String uploadId, String blobName) { - final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(blobStore.bucket(), blobName, uploadId); - S3BlobStore.configureRequestForMetrics(abortRequest, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest)); + final var abortMultipartUploadRequestBuilder = AbortMultipartUploadRequest.builder() + .bucket(blobStore.bucket()) + .key(blobName) + .uploadId(uploadId); + S3BlobStore.configureRequestForMetrics(abortMultipartUploadRequestBuilder, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose); + final var abortMultipartUploadRequest = abortMultipartUploadRequestBuilder.build(); + try (var clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest)); } } - private InitiateMultipartUploadRequest initiateMultiPartUpload(OperationPurpose purpose, String blobName) { - final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(blobStore.bucket(), blobName); - initRequest.setStorageClass(blobStore.getStorageClass()); - initRequest.setCannedACL(blobStore.getCannedACL()); - S3BlobStore.configureRequestForMetrics(initRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); + private CreateMultipartUploadRequest createMultipartUpload(OperationPurpose purpose, Operation operation, String blobName) { + final var createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder() + .bucket(blobStore.bucket()) + .key(blobName) + .storageClass(blobStore.getStorageClass()) + .acl(blobStore.getCannedACL()); if (blobStore.serverSideEncryption()) { - final ObjectMetadata md = new ObjectMetadata(); - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - initRequest.setObjectMetadata(md); + createMultipartUploadRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } - return initRequest; + S3BlobStore.configureRequestForMetrics(createMultipartUploadRequestBuilder, blobStore, operation, purpose); + return createMultipartUploadRequestBuilder.build(); } // package private for testing @@ -308,35 +330,30 @@ public void writeBlobAtomic(OperationPurpose purpose, String blobName, BytesRefe public DeleteResult delete(OperationPurpose purpose) throws IOException { final AtomicLong deletedBlobs = new AtomicLong(); final AtomicLong deletedBytes = new AtomicLong(); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - ObjectListing prevListing = null; + try (var clientReference = blobStore.clientReference()) { + ListObjectsV2Response prevListing = null; while (true) { - final ObjectListing list; + final var listObjectsRequestBuilder = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath); + S3BlobStore.configureRequestForMetrics(listObjectsRequestBuilder, blobStore, Operation.LIST_OBJECTS, purpose); if (prevListing != null) { - final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing); - S3BlobStore.configureRequestForMetrics(listNextBatchOfObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose); - list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest)); - } else { - final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); - listObjectsRequest.setBucketName(blobStore.bucket()); - listObjectsRequest.setPrefix(keyPath); - S3BlobStore.configureRequestForMetrics(listObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose); - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); + listObjectsRequestBuilder.continuationToken(prevListing.nextContinuationToken()); } - final Iterator blobNameIterator = Iterators.map(list.getObjectSummaries().iterator(), summary -> { + final var listObjectsRequest = listObjectsRequestBuilder.build(); + final var listObjectsResponse = SocketAccess.doPrivileged(() -> clientReference.client().listObjectsV2(listObjectsRequest)); + final Iterator blobNameIterator = Iterators.map(listObjectsResponse.contents().iterator(), s3Object -> { deletedBlobs.incrementAndGet(); - deletedBytes.addAndGet(summary.getSize()); - return summary.getKey(); + deletedBytes.addAndGet(s3Object.size()); + return s3Object.key(); }); - if (list.isTruncated()) { + if (listObjectsResponse.isTruncated()) { blobStore.deleteBlobs(purpose, blobNameIterator); - prevListing = list; + prevListing = listObjectsResponse; } else { blobStore.deleteBlobs(purpose, Iterators.concat(blobNameIterator, Iterators.single(keyPath))); break; } } - } catch (final AmazonClientException e) { + } catch (final SdkException e) { throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); } return new DeleteResult(deletedBlobs.get(), deletedBytes.get()); @@ -349,16 +366,22 @@ public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator listBlobsByPrefix(OperationPurpose purpose, @Nullable String blobNamePrefix) throws IOException { - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - return executeListing( - purpose, - clientReference, - listObjectsRequest(purpose, blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix)) - ).stream() - .flatMap(listing -> listing.getObjectSummaries().stream()) - .map(summary -> new BlobMetadata(summary.getKey().substring(keyPath.length()), summary.getSize())) - .collect(Collectors.toMap(BlobMetadata::name, Function.identity())); - } catch (final AmazonClientException e) { + try { + final var results = new HashMap(); + final var iterator = executeListing(purpose, blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix)); + while (iterator.hasNext()) { + final var currentPage = iterator.next(); + for (final var s3Object : currentPage.contents()) { + final var blobName = s3Object.key().substring(keyPath.length()); + if (results.put(blobName, new BlobMetadata(blobName, s3Object.size())) != null) { + throw new IllegalStateException( + "listing objects by prefix [" + blobNamePrefix + "] yielded multiple blobs with key [" + s3Object.key() + "]" + ); + } + } + } + return results; + } catch (final SdkException e) { throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e); } } @@ -370,60 +393,76 @@ public Map listBlobs(OperationPurpose purpose) throws IOEx @Override public Map children(OperationPurpose purpose) throws IOException { - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - return executeListing(purpose, clientReference, listObjectsRequest(purpose, keyPath)).stream().flatMap(listing -> { - assert listing.getObjectSummaries().stream().noneMatch(s -> { - for (String commonPrefix : listing.getCommonPrefixes()) { - if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) { - return true; - } + try { + final var results = new HashMap(); + final var relativePrefixStart = keyPath.length(); + final var iterator = executeListing(purpose, keyPath); + while (iterator.hasNext()) { + final var currentPage = iterator.next(); + for (final var commonPrefix : currentPage.commonPrefixes()) { + final var absolutePrefix = commonPrefix.prefix(); + if (absolutePrefix.length() <= relativePrefixStart + 1) { + continue; + } + final var relativePrefix = absolutePrefix.substring(relativePrefixStart, absolutePrefix.length() - 1); + assert relativePrefix.isEmpty() == false; + assert currentPage.contents().stream().noneMatch(s3Object -> s3Object.key().startsWith(absolutePrefix)) + : "Response contained children for listed common prefix " + absolutePrefix; + if (results.put(relativePrefix, blobStore.blobContainer(path().add(relativePrefix))) != null) { + throw new IllegalStateException( + "listing child containers of [" + keyPath + "] yielded multiple children with key [" + relativePrefix + "]" + ); } - return false; - }) : "Response contained children for listed common prefixes."; - return listing.getCommonPrefixes().stream(); - }) - .map(prefix -> prefix.substring(keyPath.length())) - .filter(name -> name.isEmpty() == false) - // Stripping the trailing slash off of the common prefix - .map(name -> name.substring(0, name.length() - 1)) - .collect(Collectors.toMap(Function.identity(), name -> blobStore.blobContainer(path().add(name)))); - } catch (final AmazonClientException e) { + } + } + return results; + } catch (final SdkException e) { throw new IOException("Exception when listing children of [" + path().buildAsString() + ']', e); } } - private List executeListing( - OperationPurpose purpose, - AmazonS3Reference clientReference, - ListObjectsRequest listObjectsRequest - ) { - final List results = new ArrayList<>(); - ObjectListing prevListing = null; - while (true) { - ObjectListing list; - if (prevListing != null) { - final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing); - S3BlobStore.configureRequestForMetrics(listNextBatchOfObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose); - list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest)); - } else { - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); + private Iterator executeListing(OperationPurpose purpose, String pathPrefix) { + return new Iterator<>() { + @Nullable // if after last page + private ListObjectsV2Response nextResponse = listNextObjects(purpose, pathPrefix, null); + + @Override + public boolean hasNext() { + return nextResponse != null; } - results.add(list); - if (list.isTruncated()) { - prevListing = list; - } else { - break; + + @Override + public ListObjectsV2Response next() { + final var currentResponse = nextResponse; + nextResponse = currentResponse.nextContinuationToken() == null + ? null + : listNextObjects(purpose, pathPrefix, currentResponse); + return currentResponse; } - } - return results; + }; } - private ListObjectsRequest listObjectsRequest(OperationPurpose purpose, String pathPrefix) { - final ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(blobStore.bucket()) - .withPrefix(pathPrefix) - .withDelimiter("/"); - S3BlobStore.configureRequestForMetrics(listObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose); - return listObjectsRequest; + private ListObjectsV2Response listNextObjects( + OperationPurpose operationPurpose, + String pathPrefix, + @Nullable /* if requesting the first page of objects */ + ListObjectsV2Response previousResponse + ) { + try (var clientReference = blobStore.clientReference()) { + final var listObjectsRequestBuilder = ListObjectsV2Request.builder() + .bucket(blobStore.bucket()) + .prefix(pathPrefix) + .delimiter("/"); + if (previousResponse != null) { + if (previousResponse.nextContinuationToken() == null) { + throw new IllegalStateException("cannot request next page of object listing without a continuation token"); + } + listObjectsRequestBuilder.continuationToken(previousResponse.nextContinuationToken()); + } + S3BlobStore.configureRequestForMetrics(listObjectsRequestBuilder, blobStore, Operation.LIST_OBJECTS, operationPurpose); + final var listObjectsRequest = listObjectsRequestBuilder.build(); + return SocketAccess.doPrivileged(() -> clientReference.client().listObjectsV2(listObjectsRequest)); + } } // exposed for tests @@ -441,114 +480,149 @@ void executeSingleUpload( final InputStream input, final long blobSize ) throws IOException { + try (var clientReference = s3BlobStore.clientReference()) { + // Extra safety checks + if (blobSize > MAX_FILE_SIZE.getBytes()) { + throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE); + } + if (blobSize > s3BlobStore.bufferSizeInBytes()) { + throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size"); + } - // Extra safety checks - if (blobSize > MAX_FILE_SIZE.getBytes()) { - throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE); - } - if (blobSize > s3BlobStore.bufferSizeInBytes()) { - throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size"); - } - - final ObjectMetadata md = new ObjectMetadata(); - md.setContentLength(blobSize); - if (s3BlobStore.serverSideEncryption()) { - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - } - final PutObjectRequest putRequest = new PutObjectRequest(s3BlobStore.bucket(), blobName, input, md); - putRequest.setStorageClass(s3BlobStore.getStorageClass()); - putRequest.setCannedAcl(s3BlobStore.getCannedACL()); - S3BlobStore.configureRequestForMetrics(putRequest, blobStore, Operation.PUT_OBJECT, purpose); + final var putRequestBuilder = PutObjectRequest.builder() + .bucket(s3BlobStore.bucket()) + .key(blobName) + .contentLength(blobSize) + .storageClass(s3BlobStore.getStorageClass()) + .acl(s3BlobStore.getCannedACL()); + if (s3BlobStore.serverSideEncryption()) { + putRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); + } + S3BlobStore.configureRequestForMetrics(putRequestBuilder, blobStore, Operation.PUT_OBJECT, purpose); - try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) { - SocketAccess.doPrivilegedVoid(() -> clientReference.client().putObject(putRequest)); - } catch (final AmazonClientException e) { + final var putRequest = putRequestBuilder.build(); + SocketAccess.doPrivilegedVoid( + () -> clientReference.client().putObject(putRequest, RequestBody.fromInputStream(input, blobSize)) + ); + } catch (final SdkException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); } } - /** - * Uploads a blob using multipart upload requests. - */ - void executeMultipartUpload( - OperationPurpose purpose, + private interface PartOperation { + CompletedPart doPart(String uploadId, int partNum, long partSize, boolean lastPart); + } + + // for copy, blobName and s3BlobStore are the destination + private void executeMultipart( + final OperationPurpose purpose, + final Operation operation, final S3BlobStore s3BlobStore, final String blobName, - final InputStream input, - final long blobSize + final long partSize, + final long blobSize, + final PartOperation partOperation ) throws IOException { ensureMultiPartUploadSize(blobSize); - final long partSize = s3BlobStore.bufferSizeInBytes(); final Tuple multiparts = numberOfMultiparts(blobSize, partSize); if (multiparts.v1() > Integer.MAX_VALUE) { - throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?"); + throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger part size?"); } final int nbParts = multiparts.v1().intValue(); final long lastPartSize = multiparts.v2(); assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes"; - final SetOnce uploadId = new SetOnce<>(); + final List cleanupOnFailureActions = new ArrayList<>(1); final String bucketName = s3BlobStore.bucket(); - boolean success = false; - try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) { - - uploadId.set( - SocketAccess.doPrivileged( - () -> clientReference.client().initiateMultipartUpload(initiateMultiPartUpload(purpose, blobName)).getUploadId() - ) - ); - if (Strings.isEmpty(uploadId.get())) { - throw new IOException("Failed to initialize multipart upload " + blobName); + try { + final String uploadId; + try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) { + uploadId = SocketAccess.doPrivileged( + () -> clientReference.client().createMultipartUpload(createMultipartUpload(purpose, operation, blobName)).uploadId() + ); + cleanupOnFailureActions.add(() -> abortMultiPartUpload(purpose, uploadId, blobName)); + } + if (Strings.isEmpty(uploadId)) { + throw new IOException("Failed to initialize multipart operation for " + blobName); } - final List parts = new ArrayList<>(); + final List parts = new ArrayList<>(); long bytesCount = 0; for (int i = 1; i <= nbParts; i++) { final boolean lastPart = i == nbParts; - final UploadPartRequest uploadRequest = createPartUploadRequest( - purpose, - input, - uploadId.get(), - i, - blobName, - lastPart ? lastPartSize : partSize, - lastPart - ); - bytesCount += uploadRequest.getPartSize(); - - final UploadPartResult uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest)); - parts.add(uploadResponse.getPartETag()); + final var curPartSize = lastPart ? lastPartSize : partSize; + final var partEtag = partOperation.doPart(uploadId, i, curPartSize, lastPart); + bytesCount += curPartSize; + parts.add(partEtag); } if (bytesCount != blobSize) { throw new IOException( - "Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount + "Failed to execute multipart operation for [" + + blobName + + "], expected " + + blobSize + + "bytes sent but got " + + bytesCount ); } - final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest( - bucketName, - blobName, - uploadId.get(), - parts - ); - S3BlobStore.configureRequestForMetrics(complRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); - SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); - success = true; - - } catch (final AmazonClientException e) { - throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e); - } finally { - if ((success == false) && Strings.hasLength(uploadId.get())) { - abortMultiPartUpload(purpose, uploadId.get(), blobName); + final var completeMultipartUploadRequestBuilder = CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(blobName) + .uploadId(uploadId) + .multipartUpload(b -> b.parts(parts)); + S3BlobStore.configureRequestForMetrics(completeMultipartUploadRequestBuilder, blobStore, operation, purpose); + final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build(); + try (var clientReference = s3BlobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(completeMultipartUploadRequest)); + } + cleanupOnFailureActions.clear(); + } catch (final Exception e) { + if (e instanceof SdkServiceException sse && sse.statusCode() == RestStatus.NOT_FOUND.getStatus()) { + throw new NoSuchFileException(blobName, null, e.getMessage()); } + throw new IOException("Unable to upload or copy object [" + blobName + "] using multipart upload", e); + } finally { + cleanupOnFailureActions.forEach(Runnable::run); } } + /** + * Uploads a blob using multipart upload requests. + */ + void executeMultipartUpload( + OperationPurpose purpose, + final S3BlobStore s3BlobStore, + final String blobName, + final InputStream input, + final long blobSize + ) throws IOException { + executeMultipart( + purpose, + Operation.PUT_MULTIPART_OBJECT, + s3BlobStore, + blobName, + s3BlobStore.bufferSizeInBytes(), + blobSize, + (uploadId, partNum, partSize, lastPart) -> { + final UploadPartRequest uploadRequest = createPartUploadRequest(purpose, uploadId, partNum, blobName, partSize, lastPart); + + try (var clientReference = s3BlobStore.clientReference()) { + final UploadPartResponse uploadResponse = SocketAccess.doPrivileged( + () -> clientReference.client().uploadPart(uploadRequest, RequestBody.fromInputStream(input, partSize)) + ); + + return CompletedPart.builder().partNumber(partNum).eTag(uploadResponse.eTag()).build(); + } + } + ); + } + // non-static, package private for testing void ensureMultiPartUploadSize(final long blobSize) { if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) { @@ -594,13 +668,13 @@ static Tuple numberOfMultiparts(final long totalSize, final long par private class CompareAndExchangeOperation { private final OperationPurpose purpose; - private final AmazonS3 client; + private final S3Client client; private final String bucket; private final String rawKey; private final String blobKey; private final ThreadPool threadPool; - CompareAndExchangeOperation(OperationPurpose purpose, AmazonS3 client, String bucket, String key, ThreadPool threadPool) { + CompareAndExchangeOperation(OperationPurpose purpose, S3Client client, String bucket, String key, ThreadPool threadPool) { this.purpose = purpose; this.client = client; this.bucket = bucket; @@ -623,9 +697,9 @@ void run(BytesReference expected, BytesReference updated, ActionListener upload.getInitiated().after(expiryDate))) { + final var expiryDate = Instant.ofEpochMilli(blobStore.getThreadPool().absoluteTimeInMillis() - timeToLiveMillis); + if (uploads.stream().anyMatch(upload -> upload.initiated().compareTo(expiryDate) > 0)) { logger.trace("[{}] fresh preexisting uploads vs {}", blobKey, expiryDate); return true; } // there are uploads, but they are all older than the TTL, so clean them up before carrying on (should be rare) for (final var upload : uploads) { - logger.warn( - "cleaning up stale compare-and-swap upload [{}] initiated at [{}]", - upload.getUploadId(), - upload.getInitiated() - ); - safeAbortMultipartUpload(upload.getUploadId()); + logger.warn("cleaning up stale compare-and-swap upload [{}] initiated at [{}]", upload.uploadId(), upload.initiated()); + safeAbortMultipartUpload(upload.uploadId()); } logger.trace("[{}] stale preexisting uploads vs {}", blobKey, expiryDate); @@ -726,53 +796,57 @@ private void logUploads(String description, List uploads) { blobKey, description, uploads.stream() - .map(multipartUpload -> multipartUpload.getUploadId() + ": " + multipartUpload.getInitiated()) + .map(multipartUpload -> multipartUpload.uploadId() + ": " + multipartUpload.initiated()) .collect(Collectors.joining(",")) ); } } private List listMultipartUploads() { - final var listRequest = new ListMultipartUploadsRequest(bucket); - listRequest.setPrefix(blobKey); - S3BlobStore.configureRequestForMetrics(listRequest, blobStore, Operation.LIST_OBJECTS, purpose); + final var listRequestBuilder = ListMultipartUploadsRequest.builder().bucket(bucket).prefix(blobKey); + S3BlobStore.configureRequestForMetrics(listRequestBuilder, blobStore, Operation.LIST_OBJECTS, purpose); + final var listRequest = listRequestBuilder.build(); try { - return SocketAccess.doPrivileged(() -> client.listMultipartUploads(listRequest)).getMultipartUploads(); - } catch (AmazonS3Exception e) { - if (e.getStatusCode() == 404) { + return SocketAccess.doPrivileged(() -> client.listMultipartUploads(listRequest)).uploads(); + } catch (SdkServiceException e) { + if (e.statusCode() == 404) { return List.of(); } throw e; } } - private String initiateMultipartUpload() { - final var initiateRequest = new InitiateMultipartUploadRequest(bucket, blobKey); - S3BlobStore.configureRequestForMetrics(initiateRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); - return SocketAccess.doPrivileged(() -> client.initiateMultipartUpload(initiateRequest)).getUploadId(); + private String createMultipartUpload() { + final var createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder().bucket(bucket).key(blobKey); + S3BlobStore.configureRequestForMetrics(createMultipartUploadRequestBuilder, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); + final var createMultipartUploadRequest = createMultipartUploadRequestBuilder.build(); + return SocketAccess.doPrivileged(() -> client.createMultipartUpload(createMultipartUploadRequest)).uploadId(); } - private PartETag uploadPart(BytesReference updated, String uploadId) throws IOException { - final var uploadPartRequest = new UploadPartRequest(); - uploadPartRequest.setBucketName(bucket); - uploadPartRequest.setKey(blobKey); - uploadPartRequest.setUploadId(uploadId); - uploadPartRequest.setPartNumber(1); - uploadPartRequest.setLastPart(true); - uploadPartRequest.setInputStream(updated.streamInput()); - uploadPartRequest.setPartSize(updated.length()); - S3BlobStore.configureRequestForMetrics(uploadPartRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); - return SocketAccess.doPrivileged(() -> client.uploadPart(uploadPartRequest)).getPartETag(); + private String uploadPartAndGetEtag(BytesReference updated, String uploadId) throws IOException { + final var uploadPartRequestBuilder = UploadPartRequest.builder(); + uploadPartRequestBuilder.bucket(bucket); + uploadPartRequestBuilder.key(blobKey); + uploadPartRequestBuilder.uploadId(uploadId); + uploadPartRequestBuilder.partNumber(1); + uploadPartRequestBuilder.sdkPartType(SdkPartType.LAST); + S3BlobStore.configureRequestForMetrics(uploadPartRequestBuilder, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); + return SocketAccess.doPrivilegedIOException( + () -> client.uploadPart( + uploadPartRequestBuilder.build(), + RequestBody.fromInputStream(updated.streamInput(), updated.length()) + ) + ).eTag(); } private int getUploadIndex(String targetUploadId, List multipartUploads) { var uploadIndex = 0; var found = false; - for (MultipartUpload multipartUpload : multipartUploads) { - final var observedUploadId = multipartUpload.getUploadId(); + for (final var multipartUpload : multipartUploads) { + final var observedUploadId = multipartUpload.uploadId(); if (observedUploadId.equals(targetUploadId)) { final var currentTimeMillis = blobStore.getThreadPool().absoluteTimeInMillis(); - final var ageMillis = currentTimeMillis - multipartUpload.getInitiated().toInstant().toEpochMilli(); + final var ageMillis = currentTimeMillis - multipartUpload.initiated().toEpochMilli(); final var expectedAgeRangeMillis = blobStore.getCompareAndExchangeTimeToLive().millis(); if (0 <= expectedAgeRangeMillis && (ageMillis < -expectedAgeRangeMillis || ageMillis > expectedAgeRangeMillis)) { logger.warn( @@ -781,8 +855,8 @@ private int getUploadIndex(String targetUploadId, List multipar which deviates from local node epoch time [{}] by more than the warn threshold of [{}ms]""", bucket, blobKey, - multipartUpload.getInitiated(), - multipartUpload.getInitiated().toInstant().toEpochMilli(), + multipartUpload.initiated(), + multipartUpload.initiated().toEpochMilli(), currentTimeMillis, expectedAgeRangeMillis ); @@ -835,7 +909,7 @@ private void cancelOtherUploads(String uploadId, List currentUp final var executor = blobStore.getSnapshotExecutor(); try (var listeners = new RefCountingListener(listener)) { for (final var currentUpload : currentUploads) { - final var currentUploadId = currentUpload.getUploadId(); + final var currentUploadId = currentUpload.uploadId(); if (uploadId.equals(currentUploadId) == false) { executor.execute(ActionRunnable.run(listeners.acquire(), () -> abortMultipartUploadIfExists(currentUploadId))); } @@ -854,20 +928,39 @@ private void safeAbortMultipartUpload(String uploadId) { private void abortMultipartUploadIfExists(String uploadId) { try { - final var request = new AbortMultipartUploadRequest(bucket, blobKey, uploadId); - S3BlobStore.configureRequestForMetrics(request, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose); - SocketAccess.doPrivilegedVoid(() -> client.abortMultipartUpload(request)); - } catch (AmazonS3Exception e) { - if (e.getStatusCode() != 404) { + final var abortMultipartUploadRequestBuilder = AbortMultipartUploadRequest.builder() + .bucket(bucket) + .key(blobKey) + .uploadId(uploadId); + S3BlobStore.configureRequestForMetrics( + abortMultipartUploadRequestBuilder, + blobStore, + Operation.ABORT_MULTIPART_OBJECT, + purpose + ); + final var abortMultipartUploadRequest = abortMultipartUploadRequestBuilder.build(); + SocketAccess.doPrivilegedVoid(() -> client.abortMultipartUpload(abortMultipartUploadRequest)); + } catch (SdkServiceException e) { + if (e.statusCode() != 404) { throw e; } // else already aborted } } - private void completeMultipartUpload(String uploadId, PartETag partETag) { - final var completeMultipartUploadRequest = new CompleteMultipartUploadRequest(bucket, blobKey, uploadId, List.of(partETag)); - S3BlobStore.configureRequestForMetrics(completeMultipartUploadRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose); + private void completeMultipartUpload(String uploadId, String partETag) { + final var completeMultipartUploadRequestBuilder = CompleteMultipartUploadRequest.builder() + .bucket(bucket) + .key(blobKey) + .uploadId(uploadId) + .multipartUpload(b -> b.parts(CompletedPart.builder().partNumber(1).eTag(partETag).build())); + S3BlobStore.configureRequestForMetrics( + completeMultipartUploadRequestBuilder, + blobStore, + Operation.PUT_MULTIPART_OBJECT, + purpose + ); + final var completeMultipartUploadRequest = completeMultipartUploadRequestBuilder.build(); SocketAccess.doPrivilegedVoid(() -> client.completeMultipartUpload(completeMultipartUploadRequest)); } } @@ -883,9 +976,10 @@ public void compareAndExchangeRegister( final var clientReference = blobStore.clientReference(); ActionListener.run(ActionListener.releaseAfter(listener.delegateResponse((delegate, e) -> { logger.trace(() -> Strings.format("[%s]: compareAndExchangeRegister failed", key), e); - if (e instanceof AmazonS3Exception amazonS3Exception - && (amazonS3Exception.getStatusCode() == 404 - || amazonS3Exception.getStatusCode() == 200 && "NoSuchUpload".equals(amazonS3Exception.getErrorCode()))) { + if (e instanceof AwsServiceException awsServiceException + && (awsServiceException.statusCode() == 404 + || awsServiceException.statusCode() == 200 + && "NoSuchUpload".equals(awsServiceException.awsErrorDetails().errorCode()))) { // An uncaught 404 means that our multipart upload was aborted by a concurrent operation before we could complete it. // Also (rarely) S3 can start processing the request during a concurrent abort and this can result in a 200 OK with an // NoSuchUpload... in the response. Either way, this means that our write encountered contention: @@ -905,17 +999,18 @@ public void compareAndExchangeRegister( @Override public void getRegister(OperationPurpose purpose, String key, ActionListener listener) { ActionListener.completeWith(listener, () -> { - final var getObjectRequest = new GetObjectRequest(blobStore.bucket(), buildKey(key)); - S3BlobStore.configureRequestForMetrics(getObjectRequest, blobStore, Operation.GET_OBJECT, purpose); + Exception finalException = null; + final var getObjectRequestBuilder = GetObjectRequest.builder().bucket(blobStore.bucket()).key(buildKey(key)); + S3BlobStore.configureRequestForMetrics(getObjectRequestBuilder, blobStore, Operation.GET_OBJECT, purpose); + final var getObjectRequest = getObjectRequestBuilder.build(); try ( var clientReference = blobStore.clientReference(); var s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); - var stream = s3Object.getObjectContent() ) { - return OptionalBytesReference.of(getRegisterUsingConsistentRead(stream, keyPath, key)); - } catch (AmazonS3Exception e) { + return OptionalBytesReference.of(getRegisterUsingConsistentRead(s3Object, keyPath, key)); + } catch (Exception e) { logger.trace(() -> Strings.format("[%s]: getRegister failed", key), e); - if (e.getStatusCode() == 404) { + if (e instanceof AwsServiceException awsServiceException && awsServiceException.statusCode() == 404) { return OptionalBytesReference.EMPTY; } else { throw e; @@ -927,10 +1022,19 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCountingRunnable refs) { try (var clientReference = blobStore.clientReference()) { final var bucket = blobStore.bucket(); - final var request = new ListMultipartUploadsRequest(bucket).withPrefix(keyPath).withMaxUploads(maxUploads); - request.putCustomQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey()); - final var multipartUploadListing = SocketAccess.doPrivileged(() -> clientReference.client().listMultipartUploads(request)); - final var multipartUploads = multipartUploadListing.getMultipartUploads(); + final var listMultipartUploadsRequest = ListMultipartUploadsRequest.builder() + .bucket(bucket) + .prefix(keyPath) + .maxUploads(maxUploads) + // TODO adjust to use S3BlobStore.configureRequestForMetrics, adding metrics collection + .overrideConfiguration( + b -> b.putRawQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey()) + ) + .build(); + final var multipartUploadListing = SocketAccess.doPrivileged( + () -> clientReference.client().listMultipartUploads(listMultipartUploadsRequest) + ); + final var multipartUploads = multipartUploadListing.uploads(); if (multipartUploads.isEmpty()) { logger.debug("found no multipart uploads to clean up"); return ActionListener.noop(); @@ -949,7 +1053,21 @@ ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCounti } return newMultipartUploadCleanupListener( refs, - multipartUploads.stream().map(u -> new AbortMultipartUploadRequest(bucket, u.getKey(), u.getUploadId())).toList() + Iterators.map( + multipartUploads.iterator(), + u -> AbortMultipartUploadRequest.builder() + .bucket(bucket) + .key(u.key()) + .uploadId(u.uploadId()) + // TODO adjust to use S3BlobStore.configureRequestForMetrics, adding metrics collection + .overrideConfiguration( + b -> b.putRawQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ) + ) + .build() + ) ); } } catch (Exception e) { @@ -961,25 +1079,22 @@ ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCounti private ActionListener newMultipartUploadCleanupListener( RefCountingRunnable refs, - List abortMultipartUploadRequests + Iterator abortMultipartUploadRequestIterator ) { return new ThreadedActionListener<>(blobStore.getSnapshotExecutor(), ActionListener.releaseAfter(new ActionListener<>() { @Override public void onResponse(Void unused) { try (var clientReference = blobStore.clientReference()) { - for (final var abortMultipartUploadRequest : abortMultipartUploadRequests) { - abortMultipartUploadRequest.putCustomQueryParameter( - S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, - OperationPurpose.SNAPSHOT_DATA.getKey() - ); + while (abortMultipartUploadRequestIterator.hasNext()) { + final var abortMultipartUploadRequest = abortMultipartUploadRequestIterator.next(); try { SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest)); logger.info( "cleaned up dangling multipart upload [{}] of blob [{}][{}][{}]", - abortMultipartUploadRequest.getUploadId(), + abortMultipartUploadRequest.uploadId(), blobStore.getRepositoryMetadata().name(), - abortMultipartUploadRequest.getBucketName(), - abortMultipartUploadRequest.getKey() + abortMultipartUploadRequest.bucket(), + abortMultipartUploadRequest.key() ); } catch (Exception e) { // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. Note that any failure @@ -988,10 +1103,10 @@ public void onResponse(Void unused) { logger.warn( Strings.format( "failed to clean up multipart upload [%s] of blob [%s][%s][%s]", - abortMultipartUploadRequest.getUploadId(), + abortMultipartUploadRequest.uploadId(), blobStore.getRepositoryMetadata().name(), - abortMultipartUploadRequest.getBucketName(), - abortMultipartUploadRequest.getKey() + abortMultipartUploadRequest.bucket(), + abortMultipartUploadRequest.key() ), e ); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 2a35ff5598659..a994938095384 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -9,20 +9,21 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonWebServiceRequest; -import com.amazonaws.Request; -import com.amazonaws.Response; -import com.amazonaws.metrics.RequestMetricCollector; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.util.AWSRequestMetrics; -import com.amazonaws.util.TimingInfo; +import software.amazon.awssdk.awscore.AwsRequest; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.S3Error; +import software.amazon.awssdk.services.s3.model.StorageClass; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Strings; @@ -34,6 +35,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -44,15 +46,14 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.Optional; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; +import java.util.function.Predicate; import java.util.stream.Collectors; -import static org.elasticsearch.rest.RestStatus.REQUESTED_RANGE_NOT_SATISFIED; - class S3BlobStore implements BlobStore { public static final String CUSTOM_QUERY_PARAMETER_PURPOSE = "x-purpose"; @@ -77,7 +78,7 @@ class S3BlobStore implements BlobStore { private final boolean serverSideEncryption; - private final CannedAccessControlList cannedACL; + private final ObjectCannedACL cannedACL; private final StorageClass storageClass; @@ -115,11 +116,10 @@ class S3BlobStore implements BlobStore { this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); this.s3RepositoriesMetrics = s3RepositoriesMetrics; this.bulkDeletionBatchSize = S3Repository.DELETION_BATCH_SIZE_SETTING.get(repositoryMetadata.settings()); - } - RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { - return statsCollectors.getMetricCollector(operation, purpose); + MetricPublisher getMetricPublisher(Operation operation, OperationPurpose purpose) { + return statsCollectors.getMetricPublisher(operation, purpose); } public Executor getSnapshotExecutor() { @@ -135,9 +135,9 @@ public TimeValue getCompareAndExchangeAntiContentionDelay() { } /** - * A {@link RequestMetricCollector} that processes the metrics related to each API invocation attempt according to Elasticsearch's needs + * A {@link MetricPublisher} that processes the metrics related to each API invocation attempt according to Elasticsearch's needs */ - class ElasticsearchS3MetricsCollector extends RequestMetricCollector { + class ElasticsearchS3MetricsCollector implements MetricPublisher { final LongAdder counter = new LongAdder(); private final Operation operation; @@ -158,137 +158,81 @@ private ElasticsearchS3MetricsCollector(Operation operation, OperationPurpose pu } @Override - public final void collectMetrics(Request request, Response response) { - assert assertConsistencyBetweenHttpRequestAndOperation(request, operation); - final AWSRequestMetrics awsRequestMetrics = request.getAWSRequestMetrics(); - final TimingInfo timingInfo = awsRequestMetrics.getTimingInfo(); - final long requestCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.RequestCount); - final long exceptionCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.Exception); - final long throttleCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.ThrottleException); - - // For stats reported by API, do not collect stats for null response for BWC. - // See https://github.com/elastic/elasticsearch/pull/71406 - // TODO Is this BWC really necessary? - if (response != null) { - counter.add(requestCount); + public void publish(MetricCollection metricCollection) { + assert operation.assertConsistentOperationName(metricCollection); + + boolean overallSuccess = false; + for (final var successMetricValue : metricCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)) { + // The API allows for multiple success flags but in practice there should be only one; check they're all true for safety: + if (Boolean.TRUE.equals(successMetricValue)) { + overallSuccess = true; // but keep checking just in case + } else { + overallSuccess = false; + break; + } } - // We collect all metrics regardless whether response is null - // There are many situations other than network where a null response can be returned. - // In addition, we are interested in the stats when there is a network outage. - final int numberOfAwsErrors = Optional.ofNullable(awsRequestMetrics.getProperty(AWSRequestMetrics.Field.AWSErrorCode)) - .map(List::size) - .orElse(0); - - if (exceptionCount > 0) { - final List statusCodes = Objects.requireNonNullElse( - awsRequestMetrics.getProperty(AWSRequestMetrics.Field.StatusCode), - List.of() - ); - // REQUESTED_RANGE_NOT_SATISFIED errors are expected errors due to RCO - // TODO Add more expected client error codes? - final long amountOfRequestRangeNotSatisfiedErrors = statusCodes.stream() - .filter(e -> (Integer) e == REQUESTED_RANGE_NOT_SATISFIED.getStatus()) - .count(); - if (amountOfRequestRangeNotSatisfiedErrors > 0) { - s3RepositoriesMetrics.common() - .requestRangeNotSatisfiedExceptionCounter() - .incrementBy(amountOfRequestRangeNotSatisfiedErrors, attributes); + long totalTimeNanoseconds = 0; + for (final var durationMetricValue : metricCollection.metricValues(CoreMetric.API_CALL_DURATION)) { + totalTimeNanoseconds += durationMetricValue.toNanos(); + } + + long requestCount = 0; + long responseCount = 0; + long awsErrorCount = 0; + long throttleCount = 0; + long http416ResponseCount = 0; + for (final var apiCallAttemptMetrics : metricCollection.children()) { + if ("ApiCallAttempt".equals(apiCallAttemptMetrics.name()) == false) { + continue; + } + requestCount += 1; + final var errorTypes = apiCallAttemptMetrics.metricValues(CoreMetric.ERROR_TYPE); + if (errorTypes != null && errorTypes.size() > 0) { + awsErrorCount += 1; + if (errorTypes.contains("Throttling")) { + throttleCount += 1; + } + } + + final var httpResponses = apiCallAttemptMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE); + if (httpResponses != null && httpResponses.size() > 0) { + responseCount += 1; + if (httpResponses.contains(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus())) { + http416ResponseCount += 1; + } } } + // See https://github.com/elastic/elasticsearch/pull/71406 and https://elasticco.atlassian.net/browse/ES-10223 + counter.add(responseCount); // requests that didn't get a HTTP status code assumed not to have reached S3 at all s3RepositoriesMetrics.common().operationCounter().incrementBy(1, attributes); - if (numberOfAwsErrors == requestCount) { + if (overallSuccess == false) { s3RepositoriesMetrics.common().unsuccessfulOperationCounter().incrementBy(1, attributes); } s3RepositoriesMetrics.common().requestCounter().incrementBy(requestCount, attributes); - if (exceptionCount > 0) { - s3RepositoriesMetrics.common().exceptionCounter().incrementBy(exceptionCount, attributes); - s3RepositoriesMetrics.common().exceptionHistogram().record(exceptionCount, attributes); + if (awsErrorCount > 0) { + s3RepositoriesMetrics.common().exceptionCounter().incrementBy(awsErrorCount, attributes); + s3RepositoriesMetrics.common().exceptionHistogram().record(awsErrorCount, attributes); } if (throttleCount > 0) { s3RepositoriesMetrics.common().throttleCounter().incrementBy(throttleCount, attributes); s3RepositoriesMetrics.common().throttleHistogram().record(throttleCount, attributes); } - maybeRecordHttpRequestTime(request); - } - - /** - * Used for APM style metrics to measure statics about performance. This is not for billing. - */ - private void maybeRecordHttpRequestTime(Request request) { - final List requestTimesIncludingRetries = request.getAWSRequestMetrics() - .getTimingInfo() - .getAllSubMeasurements(AWSRequestMetrics.Field.HttpRequestTime.name()); - // It can be null if the request did not reach the server for some reason - if (requestTimesIncludingRetries == null) { - return; + if (http416ResponseCount > 0) { + s3RepositoriesMetrics.common().requestRangeNotSatisfiedExceptionCounter().incrementBy(http416ResponseCount, attributes); } - final long totalTimeInNanos = getTotalTimeInNanos(requestTimesIncludingRetries); - if (totalTimeInNanos == 0) { - logger.warn("Expected HttpRequestTime to be tracked for request [{}] but found no count.", request); - } else { + if (totalTimeNanoseconds > 0) { s3RepositoriesMetrics.common() .httpRequestTimeInMillisHistogram() - .record(TimeUnit.NANOSECONDS.toMillis(totalTimeInNanos), attributes); - } - } - - private boolean assertConsistencyBetweenHttpRequestAndOperation(Request request, Operation operation) { - switch (operation) { - case HEAD_OBJECT -> { - return request.getHttpMethod().name().equals("HEAD"); - } - case GET_OBJECT, LIST_OBJECTS -> { - return request.getHttpMethod().name().equals("GET"); - } - case PUT_OBJECT -> { - return request.getHttpMethod().name().equals("PUT"); - } - case PUT_MULTIPART_OBJECT -> { - return request.getHttpMethod().name().equals("PUT") || request.getHttpMethod().name().equals("POST"); - } - case DELETE_OBJECTS -> { - return request.getHttpMethod().name().equals("POST"); - } - case ABORT_MULTIPART_OBJECT -> { - return request.getHttpMethod().name().equals("DELETE"); - } - default -> throw new AssertionError("unknown operation [" + operation + "]"); - } - } - } - - private static long getCountForMetric(TimingInfo info, AWSRequestMetrics.Field field) { - var count = info.getCounter(field.name()); - if (count == null) { - if (field == AWSRequestMetrics.Field.RequestCount) { - final String message = "Expected request count to be tracked but found not count."; - assert false : message; - logger.warn(message); + .record(TimeUnit.NANOSECONDS.toMillis(totalTimeNanoseconds), attributes); } - return 0L; - } else { - return count.longValue(); } - } - private static long getTotalTimeInNanos(List requestTimesIncludingRetries) { - // Here we calculate the timing in Nanoseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB - // (time to first byte). We use high precision time here to tell from the case when request time metric is missing (0). - // The time is converted to milliseconds for later use with an APM style counter (exposed as a long), rather than using the - // default double exposed by getTimeTakenMillisIfKnown(). - // We don't need sub-millisecond precision. So no need perform the data type castings. - long totalTimeInNanos = 0; - for (TimingInfo timingInfo : requestTimesIncludingRetries) { - var endTimeInNanos = timingInfo.getEndTimeNanoIfKnown(); - if (endTimeInNanos != null) { - totalTimeInNanos += endTimeInNanos - timingInfo.getStartTimeNano(); - } - } - return totalTimeInNanos; + @Override + public void close() {} } @Override @@ -350,19 +294,19 @@ void deleteBlobs(OperationPurpose purpose, Iterator blobNames) throws IO return; } - final List partition = new ArrayList<>(); - try (AmazonS3Reference clientReference = clientReference()) { + final List partition = new ArrayList<>(); + try { // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes final var deletionExceptions = new DeletionExceptions(); blobNames.forEachRemaining(key -> { - partition.add(key); + partition.add(ObjectIdentifier.builder().key(key).build()); if (partition.size() == bulkDeletionBatchSize) { - deletePartition(purpose, clientReference, partition, deletionExceptions); + deletePartition(purpose, partition, deletionExceptions); partition.clear(); } }); if (partition.isEmpty() == false) { - deletePartition(purpose, clientReference, partition, deletionExceptions); + deletePartition(purpose, partition, deletionExceptions); } if (deletionExceptions.exception != null) { throw deletionExceptions.exception; @@ -372,32 +316,36 @@ void deleteBlobs(OperationPurpose purpose, Iterator blobNames) throws IO } } - private void deletePartition( - OperationPurpose purpose, - AmazonS3Reference clientReference, - List partition, - DeletionExceptions deletionExceptions - ) { - try { - SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(bulkDelete(purpose, this, partition))); - } catch (MultiObjectDeleteException e) { - // We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead - // first remove all keys that were sent in the request and then add back those that ran into an exception. - logger.warn(buildDeletionErrorMessage(e), e); - deletionExceptions.useOrMaybeSuppress(e); - } catch (AmazonClientException e) { - // The AWS client threw any unexpected exception and did not execute the request at all so we do not - // remove any keys from the outstanding deletes set. + /** + * Delete one partition of a batch of blobs + * + * @param purpose The {@link OperationPurpose} of the deletion + * @param partition The list of blobs to delete + * @param deletionExceptions A holder for any exception(s) thrown during the deletion + */ + private void deletePartition(OperationPurpose purpose, List partition, DeletionExceptions deletionExceptions) { + try (AmazonS3Reference clientReference = clientReference()) { + final var response = SocketAccess.doPrivileged( + () -> clientReference.client().deleteObjects(bulkDelete(purpose, this, partition)) + ); + if (response.hasErrors()) { + final var exception = new ElasticsearchException(buildDeletionErrorMessage(response.errors())); + logger.warn(exception.getMessage(), exception); + deletionExceptions.useOrMaybeSuppress(exception); + return; + } + return; + } catch (SdkException e) { deletionExceptions.useOrMaybeSuppress(e); + return; } } - private String buildDeletionErrorMessage(MultiObjectDeleteException e) { + private String buildDeletionErrorMessage(List errors) { final var sb = new StringBuilder("Failed to delete some blobs "); - final var errors = e.getErrors(); for (int i = 0; i < errors.size() && i < MAX_DELETE_EXCEPTIONS; i++) { final var err = errors.get(i); - sb.append("[").append(err.getKey()).append("][").append(err.getCode()).append("][").append(err.getMessage()).append("]"); + sb.append("[").append(err.key()).append("][").append(err.code()).append("][").append(err.message()).append("]"); if (i < errors.size() - 1) { sb.append(","); } @@ -412,12 +360,10 @@ private String buildDeletionErrorMessage(MultiObjectDeleteException e) { return sb.toString(); } - private static DeleteObjectsRequest bulkDelete(OperationPurpose purpose, S3BlobStore blobStore, List blobs) { - final DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest(blobStore.bucket()).withKeys( - blobs.toArray(Strings.EMPTY_ARRAY) - ).withQuiet(true); - configureRequestForMetrics(deleteObjectsRequest, blobStore, Operation.DELETE_OBJECTS, purpose); - return deleteObjectsRequest; + private static DeleteObjectsRequest bulkDelete(OperationPurpose purpose, S3BlobStore blobStore, List blobs) { + final var requestBuilder = DeleteObjectsRequest.builder().bucket(blobStore.bucket()).delete(b -> b.quiet(true).objects(blobs)); + configureRequestForMetrics(requestBuilder, blobStore, Operation.DELETE_OBJECTS, purpose); + return requestBuilder.build(); } @Override @@ -435,7 +381,7 @@ StatsCollectors getStatsCollectors() { return statsCollectors; } - public CannedAccessControlList getCannedACL() { + public ObjectCannedACL getCannedACL() { return cannedACL; } @@ -443,32 +389,36 @@ public StorageClass getStorageClass() { return storageClass; } - public static StorageClass initStorageClass(String storageClass) { - if ((storageClass == null) || storageClass.equals("")) { - return StorageClass.Standard; + public static StorageClass initStorageClass(String storageClassName) { + if ((storageClassName == null) || storageClassName.equals("")) { + return StorageClass.STANDARD; } + final StorageClass storageClass; try { - final StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); - if (_storageClass.equals(StorageClass.Glacier)) { - throw new BlobStoreException("Glacier storage class is not supported"); - } - - return _storageClass; - } catch (final IllegalArgumentException illegalArgumentException) { - throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class."); + storageClass = StorageClass.fromValue(storageClassName.toUpperCase(Locale.ENGLISH)); + } catch (final Exception e) { + throw new BlobStoreException("`" + storageClassName + "` is not a valid S3 Storage Class.", e); } + if (storageClass.equals(StorageClass.GLACIER)) { + throw new BlobStoreException("Glacier storage class is not supported"); + } + if (storageClass.equals(StorageClass.UNKNOWN_TO_SDK_VERSION)) { + throw new BlobStoreException("`" + storageClassName + "` is not a known S3 Storage Class."); + } + + return storageClass; } /** * Constructs canned acl from string */ - public static CannedAccessControlList initCannedACL(String cannedACL) { + public static ObjectCannedACL initCannedACL(String cannedACL) { if ((cannedACL == null) || cannedACL.equals("")) { - return CannedAccessControlList.Private; + return ObjectCannedACL.PRIVATE; } - for (final CannedAccessControlList cur : CannedAccessControlList.values()) { + for (final ObjectCannedACL cur : ObjectCannedACL.values()) { if (cur.toString().equalsIgnoreCase(cannedACL)) { return cur; } @@ -497,7 +447,7 @@ String getKey() { } Operation(String key) { - this.key = key; + this.key = Objects.requireNonNull(key); } static Operation parse(String s) { @@ -510,6 +460,27 @@ static Operation parse(String s) { Strings.format("invalid operation [%s] expected one of [%s]", s, Strings.arrayToCommaDelimitedString(Operation.values())) ); } + + private static final Predicate IS_PUT_MULTIPART_OPERATION = Set.of( + "CreateMultipartUpload", + "UploadPart", + "CompleteMultipartUpload" + )::contains; + + private static final Predicate IS_LIST_OPERATION = Set.of("ListObjects", "ListObjectsV2", "ListMultipartUploads")::contains; + + boolean assertConsistentOperationName(MetricCollection metricCollection) { + final var operationNameMetrics = metricCollection.metricValues(CoreMetric.OPERATION_NAME); + assert operationNameMetrics.size() == 1 : operationNameMetrics; + final Predicate expectedOperationPredicate = switch (this) { + case LIST_OBJECTS -> IS_LIST_OPERATION; + case PUT_MULTIPART_OBJECT -> IS_PUT_MULTIPART_OPERATION; + case ABORT_MULTIPART_OBJECT -> "AbortMultipartUpload"::equals; + default -> key::equals; + }; + assert expectedOperationPredicate.test(operationNameMetrics.get(0)) : this + " vs " + operationNameMetrics; + return true; + } } record StatsKey(Operation operation, OperationPurpose purpose) { @@ -522,8 +493,8 @@ public String toString() { class StatsCollectors { final Map collectors = new ConcurrentHashMap<>(); - RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { - return collectors.computeIfAbsent(new StatsKey(operation, purpose), k -> buildMetricCollector(k.operation(), k.purpose())); + MetricPublisher getMetricPublisher(Operation operation, OperationPurpose purpose) { + return collectors.computeIfAbsent(new StatsKey(operation, purpose), k -> buildMetricPublisher(k.operation(), k.purpose())); } Map statsMap(boolean isStateless) { @@ -538,18 +509,20 @@ Map statsMap(boolean isStateless) { } } - ElasticsearchS3MetricsCollector buildMetricCollector(Operation operation, OperationPurpose purpose) { + ElasticsearchS3MetricsCollector buildMetricPublisher(Operation operation, OperationPurpose purpose) { return new ElasticsearchS3MetricsCollector(operation, purpose); } } static void configureRequestForMetrics( - AmazonWebServiceRequest request, + AwsRequest.Builder request, S3BlobStore blobStore, Operation operation, OperationPurpose purpose ) { - request.setRequestMetricCollector(blobStore.getMetricCollector(operation, purpose)); - request.putCustomQueryParameter(CUSTOM_QUERY_PARAMETER_PURPOSE, purpose.getKey()); + request.overrideConfiguration( + builder -> builder.metricPublishers(List.of(blobStore.getMetricPublisher(operation, purpose))) + .putRawQueryParameter(CUSTOM_QUERY_PARAMETER_PURPOSE, purpose.getKey()) + ); } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index 9ec43603068d3..26a9a32df38ee 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -9,8 +9,9 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; @@ -76,11 +77,11 @@ final class S3ClientSettings { key -> new Setting<>(key, "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope) ); - /** The protocol to use to connect to s3. */ - static final Setting.AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting( + /** Formerly the protocol to use to connect to s3, now unused. V2 AWS SDK can infer the protocol from {@link #endpoint}. */ + static final Setting.AffixSetting UNUSED_PROTOCOL_SETTING = Setting.affixKeySetting( PREFIX, "protocol", - key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) + key -> new Setting<>(key, "https", s -> HttpScheme.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope, Property.Deprecated) ); /** The host name of a proxy to connect to s3 through. */ @@ -98,10 +99,10 @@ final class S3ClientSettings { ); /** The proxy scheme for connecting to S3 through a proxy. */ - static final Setting.AffixSetting PROXY_SCHEME_SETTING = Setting.affixKeySetting( + static final Setting.AffixSetting PROXY_SCHEME_SETTING = Setting.affixKeySetting( PREFIX, "proxy.scheme", - key -> new Setting<>(key, "http", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) + key -> new Setting<>(key, "http", s -> HttpScheme.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) ); /** The username of a proxy to connect to s3 through. */ @@ -139,11 +140,11 @@ final class S3ClientSettings { key -> Setting.intSetting(key, Defaults.RETRY_COUNT, 0, Property.NodeScope) ); - /** Whether retries should be throttled (ie use backoff). */ - static final Setting.AffixSetting USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting( + /** Formerly whether retries should be throttled (ie use backoff), now unused. V2 AWS SDK always uses throttling. */ + static final Setting.AffixSetting UNUSED_USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting( PREFIX, "use_throttle_retries", - key -> Setting.boolSetting(key, Defaults.THROTTLE_RETRIES, Property.NodeScope) + key -> Setting.boolSetting(key, true, Property.NodeScope, Property.Deprecated) ); /** Whether the s3 client should use path style access. */ @@ -167,22 +168,19 @@ final class S3ClientSettings { key -> new Setting<>(key, "", Function.identity(), Property.NodeScope) ); - /** An override for the signer to use. */ - static final Setting.AffixSetting SIGNER_OVERRIDE = Setting.affixKeySetting( + /** Formerly an override for the signer to use, now unused. V2 AWS SDK only supports AWS v4 signatures. */ + static final Setting.AffixSetting UNUSED_SIGNER_OVERRIDE = Setting.affixKeySetting( PREFIX, "signer_override", - key -> new Setting<>(key, "", Function.identity(), Property.NodeScope) + key -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated) ); /** Credentials to authenticate with s3. */ - final S3BasicCredentials credentials; + final AwsCredentials credentials; /** The s3 endpoint the client should talk to, or empty string to use the default. */ final String endpoint; - /** The protocol to use to talk to s3. Defaults to https. */ - final Protocol protocol; - /** An optional proxy host that requests to s3 should be made through. */ final String proxyHost; @@ -190,7 +188,7 @@ final class S3ClientSettings { final int proxyPort; /** The proxy scheme to use for connecting to s3 through a proxy. */ - final Protocol proxyScheme; + final HttpScheme proxyScheme; // these should be "secure" yet the api for the s3 client only takes String, so storing them // as SecureString here won't really help with anything @@ -209,9 +207,6 @@ final class S3ClientSettings { /** The number of retries to use for the s3 client. */ final int maxRetries; - /** Whether the s3 client should use an exponential backoff retry policy. */ - final boolean throttleRetries; - /** Whether the s3 client should use path style access. */ final boolean pathStyleAccess; @@ -221,30 +216,23 @@ final class S3ClientSettings { /** Region to use for signing requests or empty string to use default. */ final String region; - /** Signer override to use or empty string to use default. */ - final String signerOverride; - private S3ClientSettings( - S3BasicCredentials credentials, + AwsCredentials credentials, String endpoint, - Protocol protocol, String proxyHost, int proxyPort, - Protocol proxyScheme, + HttpScheme proxyScheme, String proxyUsername, String proxyPassword, int readTimeoutMillis, int maxConnections, int maxRetries, - boolean throttleRetries, boolean pathStyleAccess, boolean disableChunkedEncoding, - String region, - String signerOverride + String region ) { this.credentials = credentials; this.endpoint = endpoint; - this.protocol = protocol; this.proxyHost = proxyHost; this.proxyPort = proxyPort; this.proxyScheme = proxyScheme; @@ -253,11 +241,9 @@ private S3ClientSettings( this.readTimeoutMillis = readTimeoutMillis; this.maxConnections = maxConnections; this.maxRetries = maxRetries; - this.throttleRetries = throttleRetries; this.pathStyleAccess = pathStyleAccess; this.disableChunkedEncoding = disableChunkedEncoding; this.region = region; - this.signerOverride = signerOverride; } /** @@ -274,50 +260,43 @@ S3ClientSettings refine(Settings repositorySettings) { .build(); final String newEndpoint = getRepoSettingOrDefault(ENDPOINT_SETTING, normalizedSettings, endpoint); - final Protocol newProtocol = getRepoSettingOrDefault(PROTOCOL_SETTING, normalizedSettings, protocol); final String newProxyHost = getRepoSettingOrDefault(PROXY_HOST_SETTING, normalizedSettings, proxyHost); final int newProxyPort = getRepoSettingOrDefault(PROXY_PORT_SETTING, normalizedSettings, proxyPort); - final Protocol newProxyScheme = getRepoSettingOrDefault(PROXY_SCHEME_SETTING, normalizedSettings, proxyScheme); + final HttpScheme newProxyScheme = getRepoSettingOrDefault(PROXY_SCHEME_SETTING, normalizedSettings, proxyScheme); final int newReadTimeoutMillis = Math.toIntExact( getRepoSettingOrDefault(READ_TIMEOUT_SETTING, normalizedSettings, TimeValue.timeValueMillis(readTimeoutMillis)).millis() ); final int newMaxConnections = getRepoSettingOrDefault(MAX_CONNECTIONS_SETTING, normalizedSettings, maxConnections); final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); - final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); final boolean newPathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); final boolean newDisableChunkedEncoding = getRepoSettingOrDefault( DISABLE_CHUNKED_ENCODING, normalizedSettings, disableChunkedEncoding ); - final S3BasicCredentials newCredentials; + final AwsCredentials newCredentials; if (checkDeprecatedCredentials(repositorySettings)) { newCredentials = loadDeprecatedCredentials(repositorySettings); } else { newCredentials = credentials; } final String newRegion = getRepoSettingOrDefault(REGION, normalizedSettings, region); - final String newSignerOverride = getRepoSettingOrDefault(SIGNER_OVERRIDE, normalizedSettings, signerOverride); if (Objects.equals(endpoint, newEndpoint) - && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) && proxyPort == newProxyPort && proxyScheme == newProxyScheme && newReadTimeoutMillis == readTimeoutMillis && maxConnections == newMaxConnections && maxRetries == newMaxRetries - && newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials) && newPathStyleAccess == pathStyleAccess && newDisableChunkedEncoding == disableChunkedEncoding - && Objects.equals(region, newRegion) - && Objects.equals(signerOverride, newSignerOverride)) { + && Objects.equals(region, newRegion)) { return this; } return new S3ClientSettings( newCredentials, newEndpoint, - newProtocol, newProxyHost, newProxyPort, newProxyScheme, @@ -326,11 +305,9 @@ S3ClientSettings refine(Settings repositorySettings) { newReadTimeoutMillis, newMaxConnections, newMaxRetries, - newThrottleRetries, newPathStyleAccess, newDisableChunkedEncoding, - newRegion, - newSignerOverride + newRegion ); } @@ -377,18 +354,18 @@ static boolean checkDeprecatedCredentials(Settings repositorySettings) { return false; } - // backcompat for reading keys out of repository settings (clusterState) - private static S3BasicCredentials loadDeprecatedCredentials(Settings repositorySettings) { + // Ancient BWC for reading creds stored unsafely in plaintext in RepositoryMetadata#settings - really dangerous but still in use + private static AwsCredentials loadDeprecatedCredentials(Settings repositorySettings) { assert checkDeprecatedCredentials(repositorySettings); try ( SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings) ) { - return new S3BasicCredentials(key.toString(), secret.toString()); + return AwsBasicCredentials.create(key.toString(), secret.toString()); } } - private static S3BasicCredentials loadCredentials(Settings settings, String clientName) { + private static AwsCredentials loadCredentials(Settings settings, String clientName) { try ( SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); @@ -397,9 +374,9 @@ private static S3BasicCredentials loadCredentials(Settings settings, String clie if (accessKey.length() != 0) { if (secretKey.length() != 0) { if (sessionToken.length() != 0) { - return new S3BasicSessionCredentials(accessKey.toString(), secretKey.toString(), sessionToken.toString()); + return AwsSessionCredentials.create(accessKey.toString(), secretKey.toString(), sessionToken.toString()); } else { - return new S3BasicCredentials(accessKey.toString(), secretKey.toString()); + return AwsBasicCredentials.create(accessKey.toString(), secretKey.toString()); } } else { throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); @@ -426,7 +403,6 @@ static S3ClientSettings getClientSettings(final Settings settings, final String return new S3ClientSettings( S3ClientSettings.loadCredentials(settings, clientName), getConfigValue(settings, clientName, ENDPOINT_SETTING), - getConfigValue(settings, clientName, PROTOCOL_SETTING), getConfigValue(settings, clientName, PROXY_HOST_SETTING), getConfigValue(settings, clientName, PROXY_PORT_SETTING), getConfigValue(settings, clientName, PROXY_SCHEME_SETTING), @@ -435,11 +411,9 @@ static S3ClientSettings getClientSettings(final Settings settings, final String Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), getConfigValue(settings, clientName, MAX_CONNECTIONS_SETTING), getConfigValue(settings, clientName, MAX_RETRIES_SETTING), - getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING), getConfigValue(settings, clientName, USE_PATH_STYLE_ACCESS), getConfigValue(settings, clientName, DISABLE_CHUNKED_ENCODING), - getConfigValue(settings, clientName, REGION), - getConfigValue(settings, clientName, SIGNER_OVERRIDE) + getConfigValue(settings, clientName, REGION) ); } } @@ -457,17 +431,14 @@ public boolean equals(final Object o) { && readTimeoutMillis == that.readTimeoutMillis && maxConnections == that.maxConnections && maxRetries == that.maxRetries - && throttleRetries == that.throttleRetries && Objects.equals(credentials, that.credentials) && Objects.equals(endpoint, that.endpoint) - && protocol == that.protocol && Objects.equals(proxyHost, that.proxyHost) && proxyScheme == that.proxyScheme && Objects.equals(proxyUsername, that.proxyUsername) && Objects.equals(proxyPassword, that.proxyPassword) && Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) - && Objects.equals(region, that.region) - && Objects.equals(signerOverride, that.signerOverride); + && Objects.equals(region, that.region); } @Override @@ -475,7 +446,6 @@ public int hashCode() { return Objects.hash( credentials, endpoint, - protocol, proxyHost, proxyPort, proxyScheme, @@ -484,10 +454,8 @@ public int hashCode() { readTimeoutMillis, maxRetries, maxConnections, - throttleRetries, disableChunkedEncoding, - region, - signerOverride + region ); } @@ -504,9 +472,8 @@ private static T getRepoSettingOrDefault(Setting.AffixSetting setting, Se } static final class Defaults { - static final TimeValue READ_TIMEOUT = TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); - static final int MAX_CONNECTIONS = ClientConfiguration.DEFAULT_MAX_CONNECTIONS; - static final int RETRY_COUNT = ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry(); - static final boolean THROTTLE_RETRIES = ClientConfiguration.DEFAULT_THROTTLE_RETRIES; + static final TimeValue READ_TIMEOUT = TimeValue.timeValueSeconds(50); + static final int MAX_CONNECTIONS = 50; + static final int RETRY_COUNT = 3; } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 37b960b33eb79..64295a7249ed6 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -9,8 +9,8 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.regions.RegionUtils; -import com.amazonaws.util.json.Jackson; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; @@ -21,6 +21,8 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -30,6 +32,7 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; +import java.lang.invoke.MethodHandles; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.Arrays; @@ -43,22 +46,20 @@ */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + private static final Logger logger = LogManager.getLogger(S3RepositoryPlugin.class); + static { SpecialPermission.check(); AccessController.doPrivileged((PrivilegedAction) () -> { try { - // kick jackson to do some static caching of declared members info - Jackson.jsonNodeOf("{}"); - // ClientConfiguration clinit has some classloader problems - // TODO: fix that - Class.forName("com.amazonaws.ClientConfiguration"); - // Pre-load region metadata to avoid looking them up dynamically without privileges enabled - RegionUtils.initialize(); - } catch (final ClassNotFoundException e) { - throw new RuntimeException(e); + // Eagerly load the RegionFromEndpointGuesser map from the resource file + MethodHandles.lookup().ensureInitialized(RegionFromEndpointGuesser.class); + } catch (IllegalAccessException unexpected) { + throw new AssertionError(unexpected); } return null; }); + } private final SetOnce service = new SetOnce<>(); @@ -88,11 +89,22 @@ protected S3Repository createRepository( public Collection createComponents(PluginServices services) { service.set(s3Service(services.environment(), services.clusterService().getSettings(), services.resourceWatcherService())); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); - return List.of(service); + return List.of(service.get()); } S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { - return new S3Service(environment, nodeSettings, resourceWatcherService); + return new S3Service(environment, nodeSettings, resourceWatcherService, S3RepositoryPlugin::getDefaultRegion); + } + + private static Region getDefaultRegion() { + return AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return DefaultAwsRegionProviderChain.builder().build().getRegion(); + } catch (Exception e) { + logger.info("failed to obtain region from default provider chain", e); + return null; + } + }); } @Override @@ -119,7 +131,7 @@ public List> getSettings() { S3ClientSettings.SECRET_KEY_SETTING, S3ClientSettings.SESSION_TOKEN_SETTING, S3ClientSettings.ENDPOINT_SETTING, - S3ClientSettings.PROTOCOL_SETTING, + S3ClientSettings.UNUSED_PROTOCOL_SETTING, S3ClientSettings.PROXY_HOST_SETTING, S3ClientSettings.PROXY_PORT_SETTING, S3ClientSettings.PROXY_SCHEME_SETTING, @@ -128,9 +140,9 @@ public List> getSettings() { S3ClientSettings.READ_TIMEOUT_SETTING, S3ClientSettings.MAX_CONNECTIONS_SETTING, S3ClientSettings.MAX_RETRIES_SETTING, - S3ClientSettings.USE_THROTTLE_RETRIES_SETTING, + S3ClientSettings.UNUSED_USE_THROTTLE_RETRIES_SETTING, S3ClientSettings.USE_PATH_STYLE_ACCESS, - S3ClientSettings.SIGNER_OVERRIDE, + S3ClientSettings.UNUSED_SIGNER_OVERRIDE, S3ClientSettings.REGION, S3Service.REPOSITORY_S3_CAS_TTL_SETTING, S3Service.REPOSITORY_S3_CAS_ANTI_CONTENTION_DELAY_SETTING, diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index da357dc09ab95..780ebe29ca7ec 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -8,12 +8,11 @@ */ package org.elasticsearch.repositories.s3; -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; @@ -55,7 +54,7 @@ class S3RetryingInputStream extends InputStream { private final long end; private final List failures; - private S3ObjectInputStream currentStream; + private ResponseInputStream currentStream; private long currentStreamFirstOffset; private long currentStreamLastOffset; private int attempt = 1; @@ -63,6 +62,7 @@ class S3RetryingInputStream extends InputStream { private long currentOffset; private boolean closed; private boolean eof; + private boolean aborted = false; S3RetryingInputStream(OperationPurpose purpose, S3BlobStore blobStore, String blobKey) throws IOException { this(purpose, blobStore, blobKey, 0, Long.MAX_VALUE - 1); @@ -90,32 +90,33 @@ class S3RetryingInputStream extends InputStream { private void openStreamWithRetry() throws IOException { while (true) { try (AmazonS3Reference clientReference = blobStore.clientReference()) { - final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); - configureRequestForMetrics(getObjectRequest, blobStore, Operation.GET_OBJECT, purpose); + final var getObjectRequestBuilder = GetObjectRequest.builder().bucket(blobStore.bucket()).key(blobKey); + configureRequestForMetrics(getObjectRequestBuilder, blobStore, Operation.GET_OBJECT, purpose); if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { assert start + currentOffset <= end : "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; - getObjectRequest.setRange(Math.addExact(start, currentOffset), end); + getObjectRequestBuilder.range("bytes=" + Math.addExact(start, currentOffset) + "-" + end); } this.currentStreamFirstOffset = Math.addExact(start, currentOffset); - final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); - this.currentStreamLastOffset = Math.addExact(currentStreamFirstOffset, getStreamLength(s3Object)); - this.currentStream = s3Object.getObjectContent(); + final var getObjectRequest = getObjectRequestBuilder.build(); + final var getObjectResponse = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); + this.currentStreamLastOffset = Math.addExact(currentStreamFirstOffset, getStreamLength(getObjectResponse.response())); + this.currentStream = getObjectResponse; return; - } catch (AmazonClientException e) { - if (e instanceof AmazonS3Exception amazonS3Exception) { - if (amazonS3Exception.getStatusCode() == RestStatus.NOT_FOUND.getStatus()) { + } catch (SdkException e) { + if (e instanceof SdkServiceException sdkServiceException) { + if (sdkServiceException.statusCode() == RestStatus.NOT_FOUND.getStatus()) { throw addSuppressedExceptions( - new NoSuchFileException("Blob object [" + blobKey + "] not found: " + amazonS3Exception.getMessage()) + new NoSuchFileException("Blob object [" + blobKey + "] not found: " + sdkServiceException.getMessage()) ); } - if (amazonS3Exception.getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) { + if (sdkServiceException.statusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) { throw addSuppressedExceptions( new RequestedRangeNotSatisfiedException( blobKey, currentStreamFirstOffset, (end < Long.MAX_VALUE - 1) ? end - currentStreamFirstOffset + 1 : end, - amazonS3Exception + sdkServiceException ) ); } @@ -130,25 +131,54 @@ private void openStreamWithRetry() throws IOException { } } - private long getStreamLength(final S3Object object) { - final ObjectMetadata metadata = object.getObjectMetadata(); + private long getStreamLength(final GetObjectResponse getObjectResponse) { try { - // Returns the content range of the object if response contains the Content-Range header. - final Long[] range = metadata.getContentRange(); - if (range != null) { - assert range[1] >= range[0] : range[1] + " vs " + range[0]; - assert range[0] == start + currentOffset - : "Content-Range start value [" + range[0] + "] exceeds start [" + start + "] + current offset [" + currentOffset + ']'; - assert range[1] <= end : "Content-Range end value [" + range[1] + "] exceeds end [" + end + ']'; - return range[1] - range[0] + 1L; - } - return metadata.getContentLength(); + return tryGetStreamLength(getObjectResponse); } catch (Exception e) { assert false : e; return Long.MAX_VALUE - 1L; // assume a large stream so that the underlying stream is aborted on closing, unless eof is reached } } + // exposed for testing + long tryGetStreamLength(GetObjectResponse getObjectResponse) { + // Returns the content range of the object if response contains the Content-Range header. + final var rangeString = getObjectResponse.contentRange(); + if (rangeString != null) { + if (rangeString.startsWith("bytes ") == false) { + throw new IllegalArgumentException( + "unexpected Content-range header [" + rangeString + "], should have started with [bytes ]" + ); + } + final var hyphenPos = rangeString.indexOf('-'); + if (hyphenPos == -1) { + throw new IllegalArgumentException("could not parse Content-range header [" + rangeString + "], missing hyphen"); + } + final var slashPos = rangeString.indexOf('/'); + if (slashPos == -1) { + throw new IllegalArgumentException("could not parse Content-range header [" + rangeString + "], missing slash"); + } + + final var rangeStart = Long.parseLong(rangeString, "bytes ".length(), hyphenPos, 10); + final var rangeEnd = Long.parseLong(rangeString, hyphenPos + 1, slashPos, 10); + if (rangeEnd < rangeStart) { + throw new IllegalArgumentException("invalid Content-range header [" + rangeString + "]"); + } + if (rangeStart != start + currentOffset) { + throw new IllegalArgumentException( + "unexpected Content-range header [" + rangeString + "], should have started at " + (start + currentOffset) + ); + } + if (rangeEnd > end) { + throw new IllegalArgumentException( + "unexpected Content-range header [" + rangeString + "], should have ended no later than " + end + ); + } + return rangeEnd - rangeStart + 1L; + } + return getObjectResponse.contentLength(); + } + @Override public int read() throws IOException { ensureOpen(); @@ -349,16 +379,17 @@ public void close() throws IOException { } /** - * Abort the {@link S3ObjectInputStream} if it wasn't read completely at the time this method is called, + * Abort the {@link ResponseInputStream} if it wasn't read completely at the time this method is called, * suppressing all thrown exceptions. */ - private void maybeAbort(S3ObjectInputStream stream) { + private void maybeAbort(ResponseInputStream stream) { if (isEof()) { return; } try { if (start + currentOffset < currentStreamLastOffset) { stream.abort(); + aborted = true; } } catch (Exception e) { logger.warn("Failed to abort stream before closing", e); @@ -391,9 +422,7 @@ boolean isEof() { // package-private for tests boolean isAborted() { - if (currentStream == null || currentStream.getHttpRequest() == null) { - return false; - } - return currentStream.getHttpRequest().isAborted(); + // just expose whether abort() was called, we cannot tell if the stream is really aborted + return aborted; } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index f20748bd77fae..e4b774e414b9a 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -9,24 +9,32 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.SDKGlobalConfiguration; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AWSCredentialsProviderChain; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.AnonymousAWSCredentials; -import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; -import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.http.IdleConnectionReaper; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.internal.Constants; -import com.amazonaws.services.securitytoken.AWSSecurityTokenService; -import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient; -import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; - +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.auth.signer.AwsS3V4Signer; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.identity.spi.ResolveIdentityRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.sts.StsClient; +import software.amazon.awssdk.services.sts.auth.StsWebIdentityTokenFileCredentialsProvider; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.conn.DnsResolver; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -34,31 +42,42 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.core.IOUtils; +import org.elasticsearch.common.util.concurrent.RunOnce; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; import org.elasticsearch.watcher.ResourceWatcherService; -import java.io.Closeable; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.PrivilegedAction; import java.time.Clock; -import java.util.List; +import java.time.Duration; import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import java.util.function.Supplier; -import static com.amazonaws.SDKGlobalConfiguration.AWS_ROLE_ARN_ENV_VAR; -import static com.amazonaws.SDKGlobalConfiguration.AWS_ROLE_SESSION_NAME_ENV_VAR; -import static com.amazonaws.SDKGlobalConfiguration.AWS_WEB_IDENTITY_ENV_VAR; import static java.util.Collections.emptyMap; +import static software.amazon.awssdk.core.SdkSystemSetting.AWS_ROLE_ARN; +import static software.amazon.awssdk.core.SdkSystemSetting.AWS_ROLE_SESSION_NAME; +import static software.amazon.awssdk.core.SdkSystemSetting.AWS_WEB_IDENTITY_TOKEN_FILE; -class S3Service implements Closeable { +class S3Service extends AbstractLifecycleComponent { private static final Logger LOGGER = LogManager.getLogger(S3Service.class); static final Setting REPOSITORY_S3_CAS_TTL_SETTING = Setting.timeSetting( @@ -90,13 +109,28 @@ class S3Service implements Closeable { */ private volatile Map derivedClientSettings = emptyMap(); + private final Runnable defaultRegionSetter; + private volatile Region defaultRegion; + + /** + * Use a signer that does not require to pre-read (and checksum) the body of PutObject and UploadPart requests since we can rely on + * TLS for equivalent protection. + */ + @SuppressWarnings("deprecation") + private static final Signer signer = AwsS3V4Signer.create(); + final CustomWebIdentityTokenCredentialsProvider webIdentityTokenCredentialsProvider; final TimeValue compareAndExchangeTimeToLive; final TimeValue compareAndExchangeAntiContentionDelay; final boolean isStateless; - S3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + S3Service( + Environment environment, + Settings nodeSettings, + ResourceWatcherService resourceWatcherService, + Supplier defaultRegionSupplier + ) { webIdentityTokenCredentialsProvider = new CustomWebIdentityTokenCredentialsProvider( environment, System::getenv, @@ -107,6 +141,7 @@ class S3Service implements Closeable { compareAndExchangeTimeToLive = REPOSITORY_S3_CAS_TTL_SETTING.get(nodeSettings); compareAndExchangeAntiContentionDelay = REPOSITORY_S3_CAS_ANTI_CONTENTION_DELAY_SETTING.get(nodeSettings); isStateless = DiscoveryNode.isStateless(nodeSettings); + defaultRegionSetter = new RunOnce(() -> defaultRegion = defaultRegionSupplier.get()); } /** @@ -142,10 +177,17 @@ public AmazonS3Reference client(RepositoryMetadata repositoryMetadata) { if (existing != null && existing.tryIncRef()) { return existing; } - final AmazonS3Reference clientReference = new AmazonS3Reference(buildClient(clientSettings)); - clientReference.mustIncRef(); - clientsCache = Maps.copyMapWithAddedEntry(clientsCache, clientSettings, clientReference); - return clientReference; + final SdkHttpClient httpClient = buildHttpClient(clientSettings, getCustomDnsResolver()); + Releasable toRelease = httpClient::close; + try { + final AmazonS3Reference clientReference = new AmazonS3Reference(buildClient(clientSettings, httpClient), httpClient); + clientReference.mustIncRef(); + clientsCache = Maps.copyMapWithAddedEntry(clientsCache, clientSettings, clientReference); + toRelease = null; + return clientReference; + } finally { + Releasables.close(toRelease); + } } } @@ -185,97 +227,201 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { } // proxy for testing - AmazonS3 buildClient(final S3ClientSettings clientSettings) { - final AmazonS3ClientBuilder builder = buildClientBuilder(clientSettings); - return SocketAccess.doPrivileged(builder::build); + S3Client buildClient(final S3ClientSettings clientSettings, SdkHttpClient httpClient) { + final S3ClientBuilder s3clientBuilder = buildClientBuilder(clientSettings, httpClient); + return SocketAccess.doPrivileged(s3clientBuilder::build); } - protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { - final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); - builder.withCredentials(buildCredentials(LOGGER, clientSettings, webIdentityTokenCredentialsProvider)); - builder.withClientConfiguration(buildConfiguration(clientSettings)); - - String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; - if ((endpoint.startsWith("http://") || endpoint.startsWith("https://")) == false) { - // Manually add the schema to the endpoint to work around https://github.com/aws/aws-sdk-java/issues/2274 - // TODO: Remove this once fixed in the AWS SDK - endpoint = clientSettings.protocol.toString() + "://" + endpoint; - } - final String region = Strings.hasLength(clientSettings.region) ? clientSettings.region : null; - LOGGER.debug("using endpoint [{}] and region [{}]", endpoint, region); - - // If the endpoint configuration isn't set on the builder then the default behaviour is to try - // and work out what region we are in and use an appropriate endpoint - see AwsClientBuilder#setRegion. - // In contrast, directly-constructed clients use s3.amazonaws.com unless otherwise instructed. We currently - // use a directly-constructed client, and need to keep the existing behaviour to avoid a breaking change, - // so to move to using the builder we must set it explicitly to keep the existing behaviour. - // - // We do this because directly constructing the client is deprecated (was already deprecated in 1.1.223 too) - // so this change removes that usage of a deprecated API. - builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, region)); + protected S3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings, SdkHttpClient httpClient) { + var s3clientBuilder = S3Client.builder(); + s3clientBuilder.httpClient(httpClient); + s3clientBuilder.overrideConfiguration(buildConfiguration(clientSettings, isStateless)); + s3clientBuilder.serviceConfiguration(b -> b.chunkedEncodingEnabled(clientSettings.disableChunkedEncoding == false)); + + s3clientBuilder.credentialsProvider(buildCredentials(LOGGER, clientSettings, webIdentityTokenCredentialsProvider)); + if (clientSettings.pathStyleAccess) { - builder.enablePathStyleAccess(); + s3clientBuilder.forcePathStyle(true); + } + + final var clientRegion = getClientRegion(clientSettings); + if (clientRegion == null) { + // If no region or endpoint is specified then (for BwC with SDKv1) default to us-east-1 and enable cross-region access: + s3clientBuilder.region(Region.US_EAST_1); + s3clientBuilder.crossRegionAccessEnabled(true); + } else { + s3clientBuilder.region(clientRegion); } - if (clientSettings.disableChunkedEncoding) { - builder.disableChunkedEncoding(); + + if (Strings.hasLength(clientSettings.endpoint)) { + s3clientBuilder.endpointOverride(URI.create(clientSettings.endpoint)); } - return builder; + + return s3clientBuilder; } - // pkg private for tests - static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { - final ClientConfiguration clientConfiguration = new ClientConfiguration(); - // the response metadata cache is only there for diagnostics purposes, - // but can force objects from every response to the old generation. - clientConfiguration.setResponseMetadataCacheSize(0); - clientConfiguration.setProtocol(clientSettings.protocol); + @Nullable // if the region is wholly unknown (falls back to us-east-1 and enables cross-region access) + Region getClientRegion(S3ClientSettings clientSettings) { + if (Strings.hasLength(clientSettings.region)) { + return Region.of(clientSettings.region); + } + final String endpointDescription; + final var hasEndpoint = Strings.hasLength(clientSettings.endpoint); + if (hasEndpoint) { + final var guessedRegion = RegionFromEndpointGuesser.guessRegion(clientSettings.endpoint); + if (guessedRegion != null) { + LOGGER.warn( + """ + found S3 client with endpoint [{}] but no configured region, guessing it should use [{}]; \ + to suppress this warning, configure the [{}] setting on this node""", + clientSettings.endpoint, + guessedRegion, + S3ClientSettings.REGION.getConcreteSettingForNamespace("CLIENT_NAME").getKey() + ); + return Region.of(guessedRegion); + } + endpointDescription = "configured endpoint [" + clientSettings.endpoint + "]"; + } else { + endpointDescription = "no configured endpoint"; + } + final var defaultRegion = this.defaultRegion; + if (defaultRegion != null) { + LOGGER.debug(""" + found S3 client with no configured region and {}, using region [{}] from SDK""", endpointDescription, defaultRegion); + return defaultRegion; + } - if (Strings.hasText(clientSettings.proxyHost)) { - // TODO: remove this leniency, these settings should exist together and be validated - clientConfiguration.setProxyHost(clientSettings.proxyHost); - clientConfiguration.setProxyPort(clientSettings.proxyPort); - clientConfiguration.setProxyProtocol(clientSettings.proxyScheme); - clientConfiguration.setProxyUsername(clientSettings.proxyUsername); - clientConfiguration.setProxyPassword(clientSettings.proxyPassword); + LOGGER.warn( + """ + found S3 client with no configured region and {}, falling back to [{}]{}; \ + to suppress this warning, configure the [{}] setting on this node""", + endpointDescription, + Region.US_EAST_1, + hasEndpoint ? "" : " and enabling cross-region access", + S3ClientSettings.REGION.getConcreteSettingForNamespace("CLIENT_NAME").getKey() + ); + + return hasEndpoint ? Region.US_EAST_1 : null; + } + + @Nullable // in production, but exposed for tests to override + DnsResolver getCustomDnsResolver() { + return null; + } + + /** + * An override for testing purposes. + */ + Optional getConnectionAcquisitionTimeout() { + return Optional.empty(); + } + + private SdkHttpClient buildHttpClient( + S3ClientSettings clientSettings, + @Nullable /* to use default resolver */ DnsResolver dnsResolver + ) { + ApacheHttpClient.Builder httpClientBuilder = ApacheHttpClient.builder(); + + var optConnectionAcquisitionTimout = getConnectionAcquisitionTimeout(); + if (optConnectionAcquisitionTimout.isPresent()) { + // Only tests set this. + httpClientBuilder.connectionAcquisitionTimeout(optConnectionAcquisitionTimout.get()); } - if (Strings.hasLength(clientSettings.signerOverride)) { - clientConfiguration.setSignerOverride(clientSettings.signerOverride); + httpClientBuilder.maxConnections(clientSettings.maxConnections); + httpClientBuilder.socketTimeout(Duration.ofMillis(clientSettings.readTimeoutMillis)); + + Optional proxyConfiguration = buildProxyConfiguration(clientSettings); + if (proxyConfiguration.isPresent()) { + httpClientBuilder.proxyConfiguration(proxyConfiguration.get()); } - clientConfiguration.setMaxConnections(clientSettings.maxConnections); - clientConfiguration.setMaxErrorRetry(clientSettings.maxRetries); - clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries); - clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); + if (dnsResolver != null) { + httpClientBuilder.dnsResolver(dnsResolver); + } + + return httpClientBuilder.build(); + } - return clientConfiguration; + static boolean isInvalidAccessKeyIdException(Throwable e) { + if (e instanceof AwsServiceException ase) { + return ase.statusCode() == RestStatus.FORBIDDEN.getStatus() && "InvalidAccessKeyId".equals(ase.awsErrorDetails().errorCode()); + } + return false; } + static ClientOverrideConfiguration buildConfiguration(S3ClientSettings clientSettings, boolean isStateless) { + ClientOverrideConfiguration.Builder clientOverrideConfiguration = ClientOverrideConfiguration.builder(); + clientOverrideConfiguration.putAdvancedOption(SdkAdvancedClientOption.SIGNER, signer); + var retryStrategyBuilder = AwsRetryStrategy.standardRetryStrategy() + .toBuilder() + .maxAttempts(clientSettings.maxRetries + 1 /* first attempt is not a retry */); + if (isStateless) { + // Create a 403 error retryable policy. In serverless we sometimes get 403s because of delays in propagating updated credentials + // because IAM is not strongly consistent. + retryStrategyBuilder.retryOnException(S3Service::isInvalidAccessKeyIdException); + } + clientOverrideConfiguration.retryStrategy(retryStrategyBuilder.build()); + return clientOverrideConfiguration.build(); + } + + /** + * Populates a {@link ProxyConfiguration} with any user specified settings via {@link S3ClientSettings}, if any are set. + * Otherwise, returns empty Optional. + */ // pkg private for tests - static AWSCredentialsProvider buildCredentials( + static Optional buildProxyConfiguration(S3ClientSettings clientSettings) { + // If proxy settings are provided + if (Strings.hasText(clientSettings.proxyHost)) { + final URIBuilder uriBuilder = new URIBuilder().setScheme(clientSettings.proxyScheme.getSchemeString()) + .setHost(clientSettings.proxyHost) + .setPort(clientSettings.proxyPort); + final URI proxyUri; + try { + proxyUri = uriBuilder.build(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + + return Optional.of( + ProxyConfiguration.builder() + .endpoint(proxyUri) // no need to set scheme, ProxyConfiguration populates the scheme from endpoint resolution + .username(clientSettings.proxyUsername) + .password(clientSettings.proxyPassword) + .build() + ); + } + return Optional.empty(); + } + + // pkg private for tests + static AwsCredentialsProvider buildCredentials( Logger logger, S3ClientSettings clientSettings, CustomWebIdentityTokenCredentialsProvider webIdentityTokenCredentialsProvider ) { - final S3BasicCredentials credentials = clientSettings.credentials; + final AwsCredentials credentials = clientSettings.credentials; if (credentials == null) { if (webIdentityTokenCredentialsProvider.isActive()) { logger.debug("Using a custom provider chain of Web Identity Token and instance profile credentials"); - return new PrivilegedAWSCredentialsProvider( - new AWSCredentialsProviderChain( - List.of( - new ErrorLoggingCredentialsProvider(webIdentityTokenCredentialsProvider, LOGGER), - new ErrorLoggingCredentialsProvider(new EC2ContainerCredentialsProviderWrapper(), LOGGER) - ) - ) + return new PrivilegedAwsCredentialsProvider( + // Wrap the credential providers in ErrorLoggingCredentialsProvider so that we get log info if/when the STS + // (in CustomWebIdentityTokenCredentialsProvider) is unavailable to the ES server, before falling back to a standard + // credential provider. + AwsCredentialsProviderChain.builder() + // If credentials are refreshed, we want to look around for different forms of credentials again. + .reuseLastProviderEnabled(false) + .addCredentialsProvider(new ErrorLoggingCredentialsProvider(webIdentityTokenCredentialsProvider, LOGGER)) + .addCredentialsProvider(new ErrorLoggingCredentialsProvider(DefaultCredentialsProvider.create(), LOGGER)) + .build() ); } else { - logger.debug("Using instance profile credentials"); - return new PrivilegedAWSCredentialsProvider(new EC2ContainerCredentialsProviderWrapper()); + logger.debug("Using DefaultCredentialsProvider for credentials"); + return new PrivilegedAwsCredentialsProvider(DefaultCredentialsProvider.create()); } } else { logger.debug("Using basic key/secret credentials"); - return new AWSStaticCredentialsProvider(credentials); + return StaticCredentialsProvider.create(credentials); } } @@ -287,9 +433,6 @@ private synchronized void releaseCachedClients() { // clear previously cached clients, they will be build lazily clientsCache = emptyMap(); derivedClientSettings = emptyMap(); - // shutdown IdleConnectionReaper background thread - // it will be restarted on new client usage - IdleConnectionReaper.shutdown(); } public void onBlobStoreClose() { @@ -297,53 +440,76 @@ public void onBlobStoreClose() { } @Override - public void close() throws IOException { + protected void doStart() { + defaultRegionSetter.run(); + } + + @Override + protected void doStop() {} + + @Override + public void doClose() throws IOException { releaseCachedClients(); - webIdentityTokenCredentialsProvider.shutdown(); + webIdentityTokenCredentialsProvider.close(); } - static class PrivilegedAWSCredentialsProvider implements AWSCredentialsProvider { - private final AWSCredentialsProvider credentialsProvider; + /** + * Wraps calls with {@link SocketAccess#doPrivileged(PrivilegedAction)} where needed. + */ + static class PrivilegedAwsCredentialsProvider implements AwsCredentialsProvider { + private final AwsCredentialsProvider delegate; + + private PrivilegedAwsCredentialsProvider(AwsCredentialsProvider delegate) { + this.delegate = delegate; + } + + AwsCredentialsProvider getCredentialsProvider() { + return delegate; + } - private PrivilegedAWSCredentialsProvider(AWSCredentialsProvider credentialsProvider) { - this.credentialsProvider = credentialsProvider; + @Override + public AwsCredentials resolveCredentials() { + return delegate.resolveCredentials(); } - AWSCredentialsProvider getCredentialsProvider() { - return credentialsProvider; + @Override + public Class identityType() { + return delegate.identityType(); } @Override - public AWSCredentials getCredentials() { - return SocketAccess.doPrivileged(credentialsProvider::getCredentials); + public CompletableFuture resolveIdentity(ResolveIdentityRequest request) { + return SocketAccess.doPrivileged(() -> delegate.resolveIdentity(request)); } @Override - public void refresh() { - SocketAccess.doPrivilegedVoid(credentialsProvider::refresh); + public CompletableFuture resolveIdentity(Consumer consumer) { + return SocketAccess.doPrivileged(() -> delegate.resolveIdentity(consumer)); + } + + @Override + public CompletableFuture resolveIdentity() { + return SocketAccess.doPrivileged(delegate::resolveIdentity); } } /** - * Customizes {@link com.amazonaws.auth.WebIdentityTokenCredentialsProvider} + * Customizes {@link StsWebIdentityTokenFileCredentialsProvider}. * *
    - *
  • Reads the the location of the web identity token not from AWS_WEB_IDENTITY_TOKEN_FILE, but from a symlink - * in the plugin directory, so we don't need to create a hardcoded read file permission for the plugin.
  • + *
  • Reads the location of the web identity token not from AWS_WEB_IDENTITY_TOKEN_FILE, but from a symlink + * in the S3 plugin directory, so we don't need to create a hardcoded read file permission for the plugin.
  • *
  • Supports customization of the STS (Security Token Service) endpoint via a system property, so we can * test it against a test fixture.
  • *
  • Supports gracefully shutting down the provider and the STS client.
  • *
*/ - static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentialsProvider { - - private static final String STS_HOSTNAME = "https://sts.amazonaws.com"; + static class CustomWebIdentityTokenCredentialsProvider implements AwsCredentialsProvider { static final String WEB_IDENTITY_TOKEN_FILE_LOCATION = "repository-s3/aws-web-identity-token-file"; - private STSAssumeRoleWithWebIdentitySessionCredentialsProvider credentialsProvider; - private AWSSecurityTokenService stsClient; - private String stsRegion; + private StsWebIdentityTokenFileCredentialsProvider credentialsProvider; + private StsClient securityTokenServiceClient; CustomWebIdentityTokenCredentialsProvider( Environment environment, @@ -352,94 +518,116 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials Clock clock, ResourceWatcherService resourceWatcherService ) { - // Check whether the original environment variable exists. If it doesn't, - // the system doesn't support AWS web identity tokens - if (systemEnvironment.getEnv(AWS_WEB_IDENTITY_ENV_VAR) == null) { + // Check whether the original environment variable exists. If it doesn't, the system doesn't support AWS web identity tokens + final var webIdentityTokenFileEnvVar = systemEnvironment.getEnv(AWS_WEB_IDENTITY_TOKEN_FILE.name()); + if (webIdentityTokenFileEnvVar == null) { return; } - // Make sure that a readable symlink to the token file exists in the plugin config directory - // AWS_WEB_IDENTITY_TOKEN_FILE exists but we only use Web Identity Tokens if a corresponding symlink exists and is readable - Path webIdentityTokenFileSymlink = environment.configDir().resolve(WEB_IDENTITY_TOKEN_FILE_LOCATION); - if (Files.exists(webIdentityTokenFileSymlink) == false) { + + // The AWS_WEB_IDENTITY_TOKEN_FILE environment variable exists, but in EKS it will point to a file outside the config directory + // and ES therefore does not have access. Instead as per the docs we require the users to set up a symlink to a fixed location + // within ${ES_CONF_PATH} which we can access: + final var webIdentityTokenFileLocation = environment.configDir().resolve(WEB_IDENTITY_TOKEN_FILE_LOCATION); + if (Files.exists(webIdentityTokenFileLocation) == false) { LOGGER.warn( - "Cannot use AWS Web Identity Tokens: AWS_WEB_IDENTITY_TOKEN_FILE is defined but no corresponding symlink exists " - + "in the config directory" + """ + Cannot use AWS Web Identity Tokens: AWS_WEB_IDENTITY_TOKEN_FILE is defined as [{}] but Elasticsearch requires a \ + symlink to this token file at location [{}] and there is nothing at that location.""", + webIdentityTokenFileEnvVar, + webIdentityTokenFileLocation ); return; } - if (Files.isReadable(webIdentityTokenFileSymlink) == false) { - throw new IllegalStateException("Unable to read a Web Identity Token symlink in the config directory"); + if (Files.isReadable(webIdentityTokenFileLocation) == false) { + throw new IllegalStateException( + Strings.format( + """ + Cannot use AWS Web Identity Tokens: AWS_WEB_IDENTITY_TOKEN_FILE is defined as [%s] but Elasticsearch requires \ + a symlink to this token file at location [{}] and this location is not readable.""", + webIdentityTokenFileEnvVar, + webIdentityTokenFileLocation + ) + ); } - String roleArn = systemEnvironment.getEnv(AWS_ROLE_ARN_ENV_VAR); + + final var roleArn = systemEnvironment.getEnv(AWS_ROLE_ARN.name()); if (roleArn == null) { LOGGER.warn( - "Unable to use a web identity token for authentication. The AWS_WEB_IDENTITY_TOKEN_FILE environment " - + "variable is set, but AWS_ROLE_ARN is missing" + """ + Cannot use AWS Web Identity Tokens: AWS_WEB_IDENTITY_TOKEN_FILE is defined as [{}] but Elasticsearch requires \ + the AWS_ROLE_ARN environment variable to be set to the ARN of the role and this variable is not set.""", + webIdentityTokenFileEnvVar ); return; } - String roleSessionName = Objects.requireNonNullElseGet( - systemEnvironment.getEnv(AWS_ROLE_SESSION_NAME_ENV_VAR), + + final var roleSessionName = Objects.requireNonNullElseGet( + systemEnvironment.getEnv(AWS_ROLE_SESSION_NAME.name()), // Mimic the default behaviour of the AWS SDK in case the session name is not set // See `com.amazonaws.auth.WebIdentityTokenCredentialsProvider#45` () -> "aws-sdk-java-" + clock.millis() ); - AWSSecurityTokenServiceClientBuilder stsClientBuilder = AWSSecurityTokenServiceClient.builder(); - - // Check if we need to use regional STS endpoints - // https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html - if ("regional".equalsIgnoreCase(systemEnvironment.getEnv("AWS_STS_REGIONAL_ENDPOINTS"))) { - // AWS_REGION should be injected by the EKS pod identity webhook: - // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 - stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); - if (stsRegion != null) { - SocketAccess.doPrivilegedVoid(() -> stsClientBuilder.withRegion(stsRegion)); - } else { - LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); + + { + final var securityTokenServiceClientBuilder = StsClient.builder(); + // allow an endpoint override in tests + final var endpointOverride = jvmEnvironment.getProperty("org.elasticsearch.repositories.s3.stsEndpointOverride", null); + if (endpointOverride != null) { + securityTokenServiceClientBuilder.endpointOverride(URI.create(endpointOverride)); } + securityTokenServiceClientBuilder.credentialsProvider(AnonymousCredentialsProvider.create()); + securityTokenServiceClient = securityTokenServiceClientBuilder.build(); } - if (stsRegion == null) { - // Custom system property used for specifying a mocked version of the STS for testing - String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); - // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. - stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); - } - stsClientBuilder.withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials())); - stsClient = SocketAccess.doPrivileged(stsClientBuilder::build); + try { - credentialsProvider = new STSAssumeRoleWithWebIdentitySessionCredentialsProvider.Builder( - roleArn, - roleSessionName, - webIdentityTokenFileSymlink.toString() - ).withStsClient(stsClient).build(); - var watcher = new FileWatcher(webIdentityTokenFileSymlink); - watcher.addListener(new FileChangesListener() { - - @Override - public void onFileCreated(Path file) { - onFileChanged(file); - } + credentialsProvider = StsWebIdentityTokenFileCredentialsProvider.builder() + .roleArn(roleArn) + .roleSessionName(roleSessionName) + .webIdentityTokenFile(webIdentityTokenFileLocation) + .stsClient(securityTokenServiceClient) + .build(); + + setupFileWatcherToRefreshCredentials(webIdentityTokenFileLocation, resourceWatcherService); + } catch (Exception e) { + securityTokenServiceClient.close(); + throw e; + } + } - @Override - public void onFileChanged(Path file) { - if (file.equals(webIdentityTokenFileSymlink)) { - LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); - SocketAccess.doPrivilegedVoid(credentialsProvider::refresh); - } + @Override + public String toString() { + return "CustomWebIdentityTokenCredentialsProvider[" + credentialsProvider + "]"; + } + + /** + * Sets up a {@link FileWatcher} that runs {@link StsWebIdentityTokenFileCredentialsProvider#resolveCredentials()} whenever the + * file to which {@code webIdentityTokenFileSymlink} refers gets updated. + */ + private void setupFileWatcherToRefreshCredentials(Path webIdentityTokenFileSymlink, ResourceWatcherService resourceWatcherService) { + var watcher = new FileWatcher(webIdentityTokenFileSymlink); + watcher.addListener(new FileChangesListener() { + + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(webIdentityTokenFileSymlink)) { + LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); + SocketAccess.doPrivilegedVoid(credentialsProvider::resolveCredentials); } - }); - try { - resourceWatcherService.add(watcher, ResourceWatcherService.Frequency.LOW); - } catch (IOException e) { - throw new ElasticsearchException( - "failed to start watching AWS web identity token file [{}]", - e, - webIdentityTokenFileSymlink - ); } - } catch (Exception e) { - stsClient.shutdown(); - throw e; + }); + try { + resourceWatcherService.add(watcher, ResourceWatcherService.Frequency.LOW); + } catch (IOException e) { + throw new ElasticsearchException( + "failed to start watching AWS web identity token file [{}]", + e, + webIdentityTokenFileSymlink + ); } } @@ -447,44 +635,63 @@ boolean isActive() { return credentialsProvider != null; } - String getStsRegion() { - return stsRegion; + public void close() throws IOException { + Releasables.close(releasableFromSdkCloseable(credentialsProvider), releasableFromSdkCloseable(securityTokenServiceClient)); + } + + private static Releasable releasableFromSdkCloseable(SdkAutoCloseable sdkAutoCloseable) { + return sdkAutoCloseable == null ? null : sdkAutoCloseable::close; } @Override - public AWSCredentials getCredentials() { + public AwsCredentials resolveCredentials() { Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); - return credentialsProvider.getCredentials(); + return credentialsProvider.resolveCredentials(); } @Override - public void refresh() { - if (credentialsProvider != null) { - credentialsProvider.refresh(); - } + public Class identityType() { + Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); + return credentialsProvider.identityType(); } - public void shutdown() throws IOException { - if (credentialsProvider != null) { - IOUtils.close(credentialsProvider, () -> stsClient.shutdown()); - } + @Override + public CompletableFuture resolveIdentity(ResolveIdentityRequest request) { + Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); + return SocketAccess.doPrivileged(() -> credentialsProvider.resolveIdentity(request)); + } + + @Override + public CompletableFuture resolveIdentity(Consumer consumer) { + Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); + return SocketAccess.doPrivileged(() -> credentialsProvider.resolveIdentity(consumer)); + } + + @Override + public CompletableFuture resolveIdentity() { + Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); + return SocketAccess.doPrivileged(credentialsProvider::resolveIdentity); } } - static class ErrorLoggingCredentialsProvider implements AWSCredentialsProvider { + /** + * Wraps a {@link AwsCredentialsProvider} implementation and only adds error logging for any {@link #resolveCredentials()} calls that + * throw. + */ + static class ErrorLoggingCredentialsProvider implements AwsCredentialsProvider { - private final AWSCredentialsProvider delegate; + private final AwsCredentialsProvider delegate; private final Logger logger; - ErrorLoggingCredentialsProvider(AWSCredentialsProvider delegate, Logger logger) { + ErrorLoggingCredentialsProvider(AwsCredentialsProvider delegate, Logger logger) { this.delegate = Objects.requireNonNull(delegate); this.logger = Objects.requireNonNull(logger); } @Override - public AWSCredentials getCredentials() { + public AwsCredentials resolveCredentials() { try { - return delegate.getCredentials(); + return delegate.resolveCredentials(); } catch (Exception e) { logger.error(() -> "Unable to load credentials from " + delegate, e); throw e; @@ -492,13 +699,42 @@ public AWSCredentials getCredentials() { } @Override - public void refresh() { - try { - delegate.refresh(); - } catch (Exception e) { - logger.error(() -> "Unable to refresh " + delegate, e); - throw e; + public Class identityType() { + return delegate.identityType(); + } + + private T resultHandler(T result, Throwable exception) { + if (exception != null) { + logger.error(() -> "Unable to resolve identity from " + delegate, exception); + if (exception instanceof Error error) { + throw error; + } else if (exception instanceof RuntimeException runtimeException) { + throw runtimeException; + } else { + throw new RuntimeException(exception); + } } + return result; + } + + @Override + public CompletableFuture resolveIdentity(ResolveIdentityRequest request) { + return SocketAccess.doPrivileged(() -> delegate.resolveIdentity(request).handle(this::resultHandler)); + } + + @Override + public CompletableFuture resolveIdentity(Consumer consumer) { + return SocketAccess.doPrivileged(() -> delegate.resolveIdentity(consumer).handle(this::resultHandler)); + } + + @Override + public CompletableFuture resolveIdentity() { + return SocketAccess.doPrivileged(() -> delegate.resolveIdentity().handle(this::resultHandler)); + } + + @Override + public String toString() { + return "ErrorLogging[" + delegate + "]"; } } diff --git a/modules/repository-s3/src/main/resources/org/elasticsearch/repositories/s3/regions_by_endpoint.txt b/modules/repository-s3/src/main/resources/org/elasticsearch/repositories/s3/regions_by_endpoint.txt new file mode 100644 index 0000000000000..3fae5c314c10b --- /dev/null +++ b/modules/repository-s3/src/main/resources/org/elasticsearch/repositories/s3/regions_by_endpoint.txt @@ -0,0 +1,179 @@ +af-south-1 s3-fips.af-south-1.amazonaws.com +af-south-1 s3-fips.dualstack.af-south-1.amazonaws.com +af-south-1 s3.af-south-1.amazonaws.com +af-south-1 s3.dualstack.af-south-1.amazonaws.com +ap-east-1 s3-fips.ap-east-1.amazonaws.com +ap-east-1 s3-fips.dualstack.ap-east-1.amazonaws.com +ap-east-1 s3.ap-east-1.amazonaws.com +ap-east-1 s3.dualstack.ap-east-1.amazonaws.com +ap-northeast-1 s3-fips.ap-northeast-1.amazonaws.com +ap-northeast-1 s3-fips.dualstack.ap-northeast-1.amazonaws.com +ap-northeast-1 s3.ap-northeast-1.amazonaws.com +ap-northeast-1 s3.dualstack.ap-northeast-1.amazonaws.com +ap-northeast-2 s3-fips.ap-northeast-2.amazonaws.com +ap-northeast-2 s3-fips.dualstack.ap-northeast-2.amazonaws.com +ap-northeast-2 s3.ap-northeast-2.amazonaws.com +ap-northeast-2 s3.dualstack.ap-northeast-2.amazonaws.com +ap-northeast-3 s3-fips.ap-northeast-3.amazonaws.com +ap-northeast-3 s3-fips.dualstack.ap-northeast-3.amazonaws.com +ap-northeast-3 s3.ap-northeast-3.amazonaws.com +ap-northeast-3 s3.dualstack.ap-northeast-3.amazonaws.com +ap-south-1 s3-fips.ap-south-1.amazonaws.com +ap-south-1 s3-fips.dualstack.ap-south-1.amazonaws.com +ap-south-1 s3.ap-south-1.amazonaws.com +ap-south-1 s3.dualstack.ap-south-1.amazonaws.com +ap-south-2 s3-fips.ap-south-2.amazonaws.com +ap-south-2 s3-fips.dualstack.ap-south-2.amazonaws.com +ap-south-2 s3.ap-south-2.amazonaws.com +ap-south-2 s3.dualstack.ap-south-2.amazonaws.com +ap-southeast-1 s3-fips.ap-southeast-1.amazonaws.com +ap-southeast-1 s3-fips.dualstack.ap-southeast-1.amazonaws.com +ap-southeast-1 s3.ap-southeast-1.amazonaws.com +ap-southeast-1 s3.dualstack.ap-southeast-1.amazonaws.com +ap-southeast-2 s3-fips.ap-southeast-2.amazonaws.com +ap-southeast-2 s3-fips.dualstack.ap-southeast-2.amazonaws.com +ap-southeast-2 s3.ap-southeast-2.amazonaws.com +ap-southeast-2 s3.dualstack.ap-southeast-2.amazonaws.com +ap-southeast-3 s3-fips.ap-southeast-3.amazonaws.com +ap-southeast-3 s3-fips.dualstack.ap-southeast-3.amazonaws.com +ap-southeast-3 s3.ap-southeast-3.amazonaws.com +ap-southeast-3 s3.dualstack.ap-southeast-3.amazonaws.com +ap-southeast-4 s3-fips.ap-southeast-4.amazonaws.com +ap-southeast-4 s3-fips.dualstack.ap-southeast-4.amazonaws.com +ap-southeast-4 s3.ap-southeast-4.amazonaws.com +ap-southeast-4 s3.dualstack.ap-southeast-4.amazonaws.com +ap-southeast-5 s3-fips.ap-southeast-5.amazonaws.com +ap-southeast-5 s3-fips.dualstack.ap-southeast-5.amazonaws.com +ap-southeast-5 s3.ap-southeast-5.amazonaws.com +ap-southeast-5 s3.dualstack.ap-southeast-5.amazonaws.com +ap-southeast-7 s3-fips.ap-southeast-7.amazonaws.com +ap-southeast-7 s3-fips.dualstack.ap-southeast-7.amazonaws.com +ap-southeast-7 s3.ap-southeast-7.amazonaws.com +ap-southeast-7 s3.dualstack.ap-southeast-7.amazonaws.com +aws-cn-global s3.aws-cn-global.amazonaws.com.cn +aws-cn-global s3.dualstack.aws-cn-global.amazonaws.com.cn +aws-iso-b-global s3-fips.aws-iso-b-global.sc2s.sgov.gov +aws-iso-b-global s3-fips.dualstack.aws-iso-b-global.sc2s.sgov.gov +aws-iso-b-global s3.aws-iso-b-global.sc2s.sgov.gov +aws-iso-b-global s3.dualstack.aws-iso-b-global.sc2s.sgov.gov +aws-iso-global s3-fips.aws-iso-global.c2s.ic.gov +aws-iso-global s3-fips.dualstack.aws-iso-global.c2s.ic.gov +aws-iso-global s3.aws-iso-global.c2s.ic.gov +aws-iso-global s3.dualstack.aws-iso-global.c2s.ic.gov +aws-us-gov-global s3-fips.aws-us-gov-global.amazonaws.com +aws-us-gov-global s3-fips.dualstack.aws-us-gov-global.amazonaws.com +aws-us-gov-global s3.aws-us-gov-global.amazonaws.com +aws-us-gov-global s3.dualstack.aws-us-gov-global.amazonaws.com +ca-central-1 s3-fips.ca-central-1.amazonaws.com +ca-central-1 s3-fips.dualstack.ca-central-1.amazonaws.com +ca-central-1 s3.ca-central-1.amazonaws.com +ca-central-1 s3.dualstack.ca-central-1.amazonaws.com +ca-west-1 s3-fips.ca-west-1.amazonaws.com +ca-west-1 s3-fips.dualstack.ca-west-1.amazonaws.com +ca-west-1 s3.ca-west-1.amazonaws.com +ca-west-1 s3.dualstack.ca-west-1.amazonaws.com +cn-north-1 s3.cn-north-1.amazonaws.com.cn +cn-north-1 s3.dualstack.cn-north-1.amazonaws.com.cn +cn-northwest-1 s3.cn-northwest-1.amazonaws.com.cn +cn-northwest-1 s3.dualstack.cn-northwest-1.amazonaws.com.cn +eu-central-1 s3-fips.dualstack.eu-central-1.amazonaws.com +eu-central-1 s3-fips.eu-central-1.amazonaws.com +eu-central-1 s3.dualstack.eu-central-1.amazonaws.com +eu-central-1 s3.eu-central-1.amazonaws.com +eu-central-2 s3-fips.dualstack.eu-central-2.amazonaws.com +eu-central-2 s3-fips.eu-central-2.amazonaws.com +eu-central-2 s3.dualstack.eu-central-2.amazonaws.com +eu-central-2 s3.eu-central-2.amazonaws.com +eu-isoe-west-1 s3-fips.dualstack.eu-isoe-west-1.cloud.adc-e.uk +eu-isoe-west-1 s3-fips.eu-isoe-west-1.cloud.adc-e.uk +eu-isoe-west-1 s3.dualstack.eu-isoe-west-1.cloud.adc-e.uk +eu-isoe-west-1 s3.eu-isoe-west-1.cloud.adc-e.uk +eu-north-1 s3-fips.dualstack.eu-north-1.amazonaws.com +eu-north-1 s3-fips.eu-north-1.amazonaws.com +eu-north-1 s3.dualstack.eu-north-1.amazonaws.com +eu-north-1 s3.eu-north-1.amazonaws.com +eu-south-1 s3-fips.dualstack.eu-south-1.amazonaws.com +eu-south-1 s3-fips.eu-south-1.amazonaws.com +eu-south-1 s3.dualstack.eu-south-1.amazonaws.com +eu-south-1 s3.eu-south-1.amazonaws.com +eu-south-2 s3-fips.dualstack.eu-south-2.amazonaws.com +eu-south-2 s3-fips.eu-south-2.amazonaws.com +eu-south-2 s3.dualstack.eu-south-2.amazonaws.com +eu-south-2 s3.eu-south-2.amazonaws.com +eu-west-1 s3-fips.dualstack.eu-west-1.amazonaws.com +eu-west-1 s3-fips.eu-west-1.amazonaws.com +eu-west-1 s3.dualstack.eu-west-1.amazonaws.com +eu-west-1 s3.eu-west-1.amazonaws.com +eu-west-2 s3-fips.dualstack.eu-west-2.amazonaws.com +eu-west-2 s3-fips.eu-west-2.amazonaws.com +eu-west-2 s3.dualstack.eu-west-2.amazonaws.com +eu-west-2 s3.eu-west-2.amazonaws.com +eu-west-3 s3-fips.dualstack.eu-west-3.amazonaws.com +eu-west-3 s3-fips.eu-west-3.amazonaws.com +eu-west-3 s3.dualstack.eu-west-3.amazonaws.com +eu-west-3 s3.eu-west-3.amazonaws.com +il-central-1 s3-fips.dualstack.il-central-1.amazonaws.com +il-central-1 s3-fips.il-central-1.amazonaws.com +il-central-1 s3.dualstack.il-central-1.amazonaws.com +il-central-1 s3.il-central-1.amazonaws.com +me-central-1 s3-fips.dualstack.me-central-1.amazonaws.com +me-central-1 s3-fips.me-central-1.amazonaws.com +me-central-1 s3.dualstack.me-central-1.amazonaws.com +me-central-1 s3.me-central-1.amazonaws.com +me-south-1 s3-fips.dualstack.me-south-1.amazonaws.com +me-south-1 s3-fips.me-south-1.amazonaws.com +me-south-1 s3.dualstack.me-south-1.amazonaws.com +me-south-1 s3.me-south-1.amazonaws.com +mx-central-1 s3-fips.dualstack.mx-central-1.amazonaws.com +mx-central-1 s3-fips.mx-central-1.amazonaws.com +mx-central-1 s3.dualstack.mx-central-1.amazonaws.com +mx-central-1 s3.mx-central-1.amazonaws.com +sa-east-1 s3-fips.dualstack.sa-east-1.amazonaws.com +sa-east-1 s3-fips.sa-east-1.amazonaws.com +sa-east-1 s3.dualstack.sa-east-1.amazonaws.com +sa-east-1 s3.sa-east-1.amazonaws.com +us-east-1 s3.us-east-1.amazonaws.com +us-east-1 s3-fips.dualstack.us-east-1.amazonaws.com +us-east-1 s3-fips.us-east-1.amazonaws.com +us-east-1 s3.amazonaws.com +us-east-1 s3.dualstack.us-east-1.amazonaws.com +us-east-2 s3-fips.dualstack.us-east-2.amazonaws.com +us-east-2 s3-fips.us-east-2.amazonaws.com +us-east-2 s3.dualstack.us-east-2.amazonaws.com +us-east-2 s3.us-east-2.amazonaws.com +us-gov-east-1 s3-fips.dualstack.us-gov-east-1.amazonaws.com +us-gov-east-1 s3-fips.us-gov-east-1.amazonaws.com +us-gov-east-1 s3.dualstack.us-gov-east-1.amazonaws.com +us-gov-east-1 s3.us-gov-east-1.amazonaws.com +us-gov-west-1 s3-fips.dualstack.us-gov-west-1.amazonaws.com +us-gov-west-1 s3-fips.us-gov-west-1.amazonaws.com +us-gov-west-1 s3.dualstack.us-gov-west-1.amazonaws.com +us-gov-west-1 s3.us-gov-west-1.amazonaws.com +us-iso-east-1 s3-fips.dualstack.us-iso-east-1.c2s.ic.gov +us-iso-east-1 s3-fips.us-iso-east-1.c2s.ic.gov +us-iso-east-1 s3.dualstack.us-iso-east-1.c2s.ic.gov +us-iso-east-1 s3.us-iso-east-1.c2s.ic.gov +us-iso-west-1 s3-fips.dualstack.us-iso-west-1.c2s.ic.gov +us-iso-west-1 s3-fips.us-iso-west-1.c2s.ic.gov +us-iso-west-1 s3.dualstack.us-iso-west-1.c2s.ic.gov +us-iso-west-1 s3.us-iso-west-1.c2s.ic.gov +us-isob-east-1 s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov +us-isob-east-1 s3-fips.us-isob-east-1.sc2s.sgov.gov +us-isob-east-1 s3.dualstack.us-isob-east-1.sc2s.sgov.gov +us-isob-east-1 s3.us-isob-east-1.sc2s.sgov.gov +us-isof-east-1 s3-fips.dualstack.us-isof-east-1.csp.hci.ic.gov +us-isof-east-1 s3-fips.us-isof-east-1.csp.hci.ic.gov +us-isof-east-1 s3.dualstack.us-isof-east-1.csp.hci.ic.gov +us-isof-east-1 s3.us-isof-east-1.csp.hci.ic.gov +us-isof-south-1 s3-fips.dualstack.us-isof-south-1.csp.hci.ic.gov +us-isof-south-1 s3-fips.us-isof-south-1.csp.hci.ic.gov +us-isof-south-1 s3.dualstack.us-isof-south-1.csp.hci.ic.gov +us-isof-south-1 s3.us-isof-south-1.csp.hci.ic.gov +us-west-1 s3-fips.dualstack.us-west-1.amazonaws.com +us-west-1 s3-fips.us-west-1.amazonaws.com +us-west-1 s3.dualstack.us-west-1.amazonaws.com +us-west-1 s3.us-west-1.amazonaws.com +us-west-2 s3-fips.dualstack.us-west-2.amazonaws.com +us-west-2 s3-fips.us-west-2.amazonaws.com +us-west-2 s3.dualstack.us-west-2.amazonaws.com +us-west-2 s3.us-west-2.amazonaws.com diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 0aac0ba898f97..5244c956df75d 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -9,31 +9,39 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AWSCredentialsProviderChain; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.regions.Region; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import org.mockito.stubbing.Answer; +import java.io.IOException; import java.util.Locale; import java.util.Map; +import java.util.concurrent.CompletableFuture; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; public class AwsS3ServiceImplTests extends ESTestCase { @@ -41,38 +49,47 @@ public class AwsS3ServiceImplTests extends ESTestCase { S3Service.CustomWebIdentityTokenCredentialsProvider.class ); - public void testAWSCredentialsDefaultToInstanceProviders() { - final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); - final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, inexistentClientName); - final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials( + /** + * {@code webIdentityTokenCredentialsProvider} is not set up, so {@link S3Service#buildCredentials} should not use it and should instead + * fall through to a {@link DefaultCredentialsProvider}. + */ + public void testAwsCredentialsFallsThroughToDefaultCredentialsProvider() { + final String nonExistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, nonExistentClientName); + final AwsCredentialsProvider credentialsProvider = S3Service.buildCredentials( logger, clientSettings, webIdentityTokenCredentialsProvider ); - assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedAWSCredentialsProvider.class)); - var privilegedAWSCredentialsProvider = (S3Service.PrivilegedAWSCredentialsProvider) credentialsProvider; - assertThat(privilegedAWSCredentialsProvider.getCredentialsProvider(), instanceOf(EC2ContainerCredentialsProviderWrapper.class)); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedAwsCredentialsProvider.class)); + var privilegedAWSCredentialsProvider = (S3Service.PrivilegedAwsCredentialsProvider) credentialsProvider; + assertThat(privilegedAWSCredentialsProvider.getCredentialsProvider(), instanceOf(DefaultCredentialsProvider.class)); } public void testSupportsWebIdentityTokenCredentials() { - Mockito.when(webIdentityTokenCredentialsProvider.getCredentials()) - .thenReturn(new BasicAWSCredentials("sts_access_key_id", "sts_secret_key")); + AwsBasicCredentials credentials = AwsBasicCredentials.create("sts_access_key_id", "sts_secret_key"); + Mockito.when(webIdentityTokenCredentialsProvider.resolveCredentials()).thenReturn(credentials); + // Mockito has difficulty with #resolveIdentity()'s generic return type. Using #thenAnswer (instead of #thenReturn) provides a + // workaround. + Answer> answer = invocation -> CompletableFuture.completedFuture(credentials); + Mockito.when(webIdentityTokenCredentialsProvider.resolveIdentity()).thenAnswer(answer); Mockito.when(webIdentityTokenCredentialsProvider.isActive()).thenReturn(true); - AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials( + AwsCredentialsProvider credentialsProvider = S3Service.buildCredentials( logger, S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8).toLowerCase(Locale.ROOT)), webIdentityTokenCredentialsProvider ); - assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedAWSCredentialsProvider.class)); - var privilegedAWSCredentialsProvider = (S3Service.PrivilegedAWSCredentialsProvider) credentialsProvider; - assertThat(privilegedAWSCredentialsProvider.getCredentialsProvider(), instanceOf(AWSCredentialsProviderChain.class)); - AWSCredentials credentials = privilegedAWSCredentialsProvider.getCredentials(); - assertEquals("sts_access_key_id", credentials.getAWSAccessKeyId()); - assertEquals("sts_secret_key", credentials.getAWSSecretKey()); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedAwsCredentialsProvider.class)); + var privilegedAWSCredentialsProvider = (S3Service.PrivilegedAwsCredentialsProvider) credentialsProvider; + assertThat(privilegedAWSCredentialsProvider.getCredentialsProvider(), instanceOf(AwsCredentialsProviderChain.class)); + AwsCredentials resolvedCredentials = privilegedAWSCredentialsProvider.resolveCredentials(); + assertEquals("sts_access_key_id", resolvedCredentials.accessKeyId()); + assertEquals("sts_secret_key", resolvedCredentials.secretAccessKey()); } - public void testAWSCredentialsFromKeystore() { + public void testAwsCredentialsFromKeystore() { + /** Create a random number of clients that use basic access key + secret key credentials */ final MockSecureSettings secureSettings = new MockSecureSettings(); final String clientNamePrefix = "some_client_name_"; final int clientsCount = randomIntBetween(0, 4); @@ -88,28 +105,29 @@ public void testAWSCredentialsFromKeystore() { for (int i = 0; i < clientsCount; i++) { final String clientName = clientNamePrefix + i; final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); - final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials( + final AwsCredentialsProvider credentialsProvider = S3Service.buildCredentials( logger, someClientSettings, webIdentityTokenCredentialsProvider ); - assertThat(credentialsProvider, instanceOf(AWSStaticCredentialsProvider.class)); - assertThat(credentialsProvider.getCredentials().getAWSAccessKeyId(), is(clientName + "_aws_access_key")); - assertThat(credentialsProvider.getCredentials().getAWSSecretKey(), is(clientName + "_aws_secret_key")); + assertThat(credentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(credentialsProvider.resolveCredentials().accessKeyId(), is(clientName + "_aws_access_key")); + assertThat(credentialsProvider.resolveCredentials().secretAccessKey(), is(clientName + "_aws_secret_key")); } - // test default exists and is an Instance provider + + /** Test that the default client, without basic access + secret keys, will fall back to using the DefaultCredentialsProvider */ final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); - final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials( + final AwsCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials( logger, defaultClientSettings, webIdentityTokenCredentialsProvider ); - assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedAWSCredentialsProvider.class)); - var privilegedAWSCredentialsProvider = (S3Service.PrivilegedAWSCredentialsProvider) defaultCredentialsProvider; - assertThat(privilegedAWSCredentialsProvider.getCredentialsProvider(), instanceOf(EC2ContainerCredentialsProviderWrapper.class)); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedAwsCredentialsProvider.class)); + var privilegedAWSCredentialsProvider = (S3Service.PrivilegedAwsCredentialsProvider) defaultCredentialsProvider; + assertThat(privilegedAWSCredentialsProvider.getCredentialsProvider(), instanceOf(DefaultCredentialsProvider.class)); } - public void testSetDefaultCredential() { + public void testBasicAccessKeyAndSecretKeyCredentials() { final MockSecureSettings secureSettings = new MockSecureSettings(); final String awsAccessKey = randomAlphaOfLength(8); final String awsSecretKey = randomAlphaOfLength(8); @@ -120,14 +138,14 @@ public void testSetDefaultCredential() { assertThat(allClientsSettings.size(), is(1)); // test default exists and is an Instance provider final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); - final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials( + final AwsCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials( logger, defaultClientSettings, webIdentityTokenCredentialsProvider ); - assertThat(defaultCredentialsProvider, instanceOf(AWSStaticCredentialsProvider.class)); - assertThat(defaultCredentialsProvider.getCredentials().getAWSAccessKeyId(), is(awsAccessKey)); - assertThat(defaultCredentialsProvider.getCredentials().getAWSSecretKey(), is(awsSecretKey)); + assertThat(defaultCredentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(defaultCredentialsProvider.resolveCredentials().accessKeyId(), is(awsAccessKey)); + assertThat(defaultCredentialsProvider.resolveCredentials().secretAccessKey(), is(awsSecretKey)); } public void testCredentialsIncomplete() { @@ -151,77 +169,60 @@ public void testCredentialsIncomplete() { public void testAWSDefaultConfiguration() { launchAWSConfigurationTest( Settings.EMPTY, - Protocol.HTTPS, null, -1, null, null, + null, 3, - ClientConfiguration.DEFAULT_THROTTLE_RETRIES, - ClientConfiguration.DEFAULT_SOCKET_TIMEOUT + Math.toIntExact(S3ClientSettings.Defaults.READ_TIMEOUT.seconds()) ); } - public void testAWSConfigurationWithAwsSettings() { + public void testAwsConfigurationWithAwsSettings() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.proxy.username", "aws_proxy_username"); secureSettings.setString("s3.client.default.proxy.password", "aws_proxy_password"); final Settings settings = Settings.builder() .setSecureSettings(secureSettings) - .put("s3.client.default.protocol", "http") - .put("s3.client.default.proxy.host", "aws_proxy_host") + // NB: URI #getHost returns null if host string contains underscores: don't do it. Underscores are invalid in URL host strings. + .put("s3.client.default.proxy.host", "aws-proxy-host") .put("s3.client.default.proxy.port", 8080) + .put("s3.client.default.proxy.scheme", "http") .put("s3.client.default.read_timeout", "10s") .build(); - launchAWSConfigurationTest( - settings, - Protocol.HTTP, - "aws_proxy_host", - 8080, - "aws_proxy_username", - "aws_proxy_password", - 3, - ClientConfiguration.DEFAULT_THROTTLE_RETRIES, - 10000 - ); + launchAWSConfigurationTest(settings, "aws-proxy-host", 8080, "http", "aws_proxy_username", "aws_proxy_password", 3, 10000); } public void testRepositoryMaxRetries() { final Settings settings = Settings.builder().put("s3.client.default.max_retries", 5).build(); - launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 5, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 50000); - } - - public void testRepositoryThrottleRetries() { - final boolean throttling = randomBoolean(); - - final Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); - launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); + launchAWSConfigurationTest(settings, null, -1, null, null, null, 5, 50000); } private void launchAWSConfigurationTest( Settings settings, - Protocol expectedProtocol, String expectedProxyHost, int expectedProxyPort, + String expectedHttpScheme, String expectedProxyUsername, String expectedProxyPassword, Integer expectedMaxRetries, - boolean expectedUseThrottleRetries, int expectedReadTimeout ) { - final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings); - - assertThat(configuration.getResponseMetadataCacheSize(), is(0)); - assertThat(configuration.getProtocol(), is(expectedProtocol)); - assertThat(configuration.getProxyHost(), is(expectedProxyHost)); - assertThat(configuration.getProxyPort(), is(expectedProxyPort)); - assertThat(configuration.getProxyUsername(), is(expectedProxyUsername)); - assertThat(configuration.getProxyPassword(), is(expectedProxyPassword)); - assertThat(configuration.getMaxErrorRetry(), is(expectedMaxRetries)); - assertThat(configuration.useThrottledRetries(), is(expectedUseThrottleRetries)); - assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); + + final var proxyClientConfiguration = S3Service.buildProxyConfiguration(clientSettings); + if (proxyClientConfiguration.isPresent()) { + final var proxyConfig = proxyClientConfiguration.get(); + assertThat(proxyConfig.username(), is(expectedProxyUsername)); + assertThat(proxyConfig.password(), is(expectedProxyPassword)); + assertThat(proxyConfig.scheme(), is(expectedHttpScheme)); + assertThat(proxyConfig.host(), is(expectedProxyHost)); + assertThat(proxyConfig.port(), is(expectedProxyPort)); + } + + final ClientOverrideConfiguration configuration = S3Service.buildConfiguration(clientSettings, false); + assertThat(configuration.retryStrategy().get().maxAttempts(), is(expectedMaxRetries + 1)); } public void testEndpointSetting() { @@ -235,14 +236,36 @@ private void assertEndpoint(Settings repositorySettings, Settings settings, Stri assertThat(clientSettings.endpoint, is(expectedEndpoint)); } - public void testLoggingCredentialsProviderCatchesErrors() { - var mockProvider = Mockito.mock(AWSCredentialsProvider.class); + public void testEndPointAndRegionOverrides() throws IOException { + try ( + S3Service s3Service = new S3Service( + mock(Environment.class), + Settings.EMPTY, + mock(ResourceWatcherService.class), + () -> Region.of("es-test-region") + ) + ) { + s3Service.start(); + final String endpointOverride = "http://first"; + final Settings settings = Settings.builder().put("endpoint", endpointOverride).build(); + final AmazonS3Reference reference = s3Service.client(new RepositoryMetadata("first", "s3", settings)); + + assertEquals(endpointOverride, reference.client().serviceClientConfiguration().endpointOverride().get().toString()); + assertEquals("es-test-region", reference.client().serviceClientConfiguration().region().toString()); + + reference.close(); + s3Service.doClose(); + } + } + + public void testLoggingCredentialsProviderCatchesErrorsOnResolveCredentials() { + var mockProvider = Mockito.mock(AwsCredentialsProvider.class); String mockProviderErrorMessage = "mockProvider failed to generate credentials"; - Mockito.when(mockProvider.getCredentials()).thenThrow(new IllegalStateException(mockProviderErrorMessage)); + Mockito.when(mockProvider.resolveCredentials()).thenThrow(new IllegalStateException(mockProviderErrorMessage)); var mockLogger = Mockito.mock(Logger.class); var credentialsProvider = new S3Service.ErrorLoggingCredentialsProvider(mockProvider, mockLogger); - var exception = expectThrows(IllegalStateException.class, credentialsProvider::getCredentials); + var exception = expectThrows(IllegalStateException.class, credentialsProvider::resolveCredentials); assertEquals(mockProviderErrorMessage, exception.getMessage()); var messageSupplierCaptor = ArgumentCaptor.forClass(Supplier.class); @@ -253,22 +276,27 @@ public void testLoggingCredentialsProviderCatchesErrors() { assertThat(throwableCaptor.getValue().getMessage(), equalTo(mockProviderErrorMessage)); } - public void testLoggingCredentialsProviderCatchesErrorsOnRefresh() { - var mockProvider = Mockito.mock(AWSCredentialsProvider.class); - String mockProviderErrorMessage = "mockProvider failed to refresh"; - Mockito.doThrow(new IllegalStateException(mockProviderErrorMessage)).when(mockProvider).refresh(); + public void testLoggingCredentialsProviderCatchesErrorsOnResolveIdentity() { + // Set up #resolveIdentity() to return a future with an exception. + var mockCredentialsProvider = Mockito.mock(AwsCredentialsProvider.class); + String mockProviderErrorMessage = "mockProvider failed to generate credentials"; + Answer> answer = invocation -> { + CompletableFuture future = new CompletableFuture<>(); + future.completeExceptionally(new IllegalStateException(mockProviderErrorMessage)); + return future; + }; + Mockito.when(mockCredentialsProvider.resolveIdentity()).thenAnswer(answer); var mockLogger = Mockito.mock(Logger.class); + var credentialsProvider = new S3Service.ErrorLoggingCredentialsProvider(mockCredentialsProvider, mockLogger); - var credentialsProvider = new S3Service.ErrorLoggingCredentialsProvider(mockProvider, mockLogger); - var exception = expectThrows(IllegalStateException.class, credentialsProvider::refresh); - assertEquals(mockProviderErrorMessage, exception.getMessage()); + // The S3Service.ErrorLoggingCredentialsProvider should log the error. + credentialsProvider.resolveIdentity(); var messageSupplierCaptor = ArgumentCaptor.forClass(Supplier.class); var throwableCaptor = ArgumentCaptor.forClass(Throwable.class); Mockito.verify(mockLogger).error(messageSupplierCaptor.capture(), throwableCaptor.capture()); - assertThat(messageSupplierCaptor.getValue().get().toString(), startsWith("Unable to refresh")); + assertThat(messageSupplierCaptor.getValue().get().toString(), startsWith("Unable to resolve identity from")); assertThat(throwableCaptor.getValue().getMessage(), equalTo(mockProviderErrorMessage)); } - } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesserTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesserTests.java new file mode 100644 index 0000000000000..9fe0c40c83979 --- /dev/null +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RegionFromEndpointGuesserTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; + +public class RegionFromEndpointGuesserTests extends ESTestCase { + public void testRegionGuessing() { + assertRegionGuess("s3.amazonaws.com", "us-east-1"); + assertRegionGuess("s3.eu-west-1.amazonaws.com", "eu-west-1"); + assertRegionGuess("s3.us-west-2.amazonaws.com", "us-west-2"); + assertRegionGuess("s3.ap-southeast-1.amazonaws.com", "ap-southeast-1"); + assertRegionGuess("s3-fips.us-gov-east-1.amazonaws.com", "us-gov-east-1"); + assertRegionGuess("10.0.0.4", null); + assertRegionGuess("random.endpoint.internal.net", null); + } + + private static void assertRegionGuess(String endpoint, @Nullable String expectedRegion) { + assertEquals(endpoint, expectedRegion, RegionFromEndpointGuesser.guessRegion(endpoint)); + } +} diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index c40685c26c3da..4e82d2161452f 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -9,14 +9,15 @@ package org.elasticsearch.repositories.s3; import fixture.s3.S3HttpHandler; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkException; -import com.amazonaws.DnsResolver; -import com.amazonaws.SdkClientException; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import org.apache.http.HttpStatus; +import org.apache.http.conn.ConnectionPoolTimeoutException; +import org.apache.http.conn.DnsResolver; import org.apache.logging.log4j.Level; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.RepositoryMetadata; @@ -62,17 +63,22 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.OptionalInt; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; @@ -104,6 +110,7 @@ @SuppressForbidden(reason = "use a http server") public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTestCase { + private static final int MAX_NUMBER_SNAPSHOT_DELETE_RETRIES = 10; private S3Service service; private volatile boolean shouldErrorOnDns; private RecordingMeterRegistry recordingMeterRegistry; @@ -111,20 +118,34 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes @Before public void setUp() throws Exception { shouldErrorOnDns = false; - service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class)) { + service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class), () -> null) { + private InetAddress[] resolveHost(String host) throws UnknownHostException { + assertEquals("127.0.0.1", host); + if (shouldErrorOnDns && randomBoolean() && randomBoolean()) { + throw new UnknownHostException(host); + } + return new InetAddress[] { InetAddress.getLoopbackAddress() }; + } + @Override - protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { - final AmazonS3ClientBuilder builder = super.buildClientBuilder(clientSettings); - final DnsResolver defaultDnsResolver = builder.getClientConfiguration().getDnsResolver(); - builder.getClientConfiguration().setDnsResolver(host -> { - if (shouldErrorOnDns && randomBoolean() && randomBoolean()) { - throw new UnknownHostException(host); - } - return defaultDnsResolver.resolve(host); - }); - return builder; + DnsResolver getCustomDnsResolver() { + return this::resolveHost; } + + /** + * Overrides the S3Client's HTTP Client connection acquisition timeout. Essentially, once the client's max connection number is + * reached ({@link S3ClientSettings#MAX_CONNECTIONS_SETTING}), new requests will fail (timeout) fast when a connection is not + * available. + */ + @Override + Optional getConnectionAcquisitionTimeout() { + // This override is used to make requests timeout nearly immediately if the max number of concurrent connections is reached + // on the HTTP client. + return Optional.of(Duration.of(1, ChronoUnit.MILLIS)); + } + }; + service.start(); recordingMeterRegistry = new RecordingMeterRegistry(); super.setUp(); } @@ -147,7 +168,7 @@ protected String bytesContentType() { @Override protected Class unresponsiveExceptionType() { - return SdkClientException.class; + return SdkException.class; } @Override @@ -165,6 +186,7 @@ protected BlobContainer createBlobContainer( final InetSocketAddress address = httpServer.getAddress(); final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); + logger.info("--> creating client with endpoint [{}]", endpoint); clientSettings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); if (maxRetries != null) { @@ -548,6 +570,72 @@ public void testWriteLargeBlobStreaming() throws Exception { assertEquals(blobSize, bytesReceived.get()); } + public void testMaxConnections() throws InterruptedException, IOException { + final CountDownLatch requestReceived = new CountDownLatch(1); + final CountDownLatch releaseRequest = new CountDownLatch(1); + int maxConnections = 1; + final BlobContainer blobContainer = createBlobContainer(null, null, null, maxConnections, null, null, null); + + // Setting up a simple request handler that returns NOT_FOUND, so as to avoid setting up a response. + @SuppressForbidden(reason = "use a http server") + class NotFoundReadHandler implements HttpHandler { + @Override + public void handle(HttpExchange exchange) throws IOException { + logger.info("---> First request received"); + // Signal that the request has begun. + requestReceived.countDown(); + try { + // Wait for a signal to stop hanging. + releaseRequest.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + logger.info("---> First request released"); + exchange.sendResponseHeaders(HttpStatus.SC_NOT_FOUND, -1); + exchange.close(); + logger.info("---> First request finished"); + } + } + httpServer.createContext(downloadStorageEndpoint(blobContainer, "max_connection_timeout"), new NotFoundReadHandler()); + + // Start up an async thread to monopolize the one http client connection (the request will hang per the above handling). + Thread thread = new Thread(() -> { + expectThrows(NoSuchFileException.class, () -> { + try (InputStream inputStream = blobContainer.readBlob(randomRetryingPurpose(), "max_connection_timeout")) { + Streams.readFully(inputStream); + } + }); + }); + thread.start(); + logger.info("---> Sending first request"); + requestReceived.await(); + logger.info("---> First request was received and is hanging"); + + // Now we'll run a second request, which should error out with a connection exception. + final var exception = expectThrows(SdkClientException.class, () -> { + try ( + var inputStream = blobContainer.readBlob( + OperationPurpose.REPOSITORY_ANALYSIS /* no retries needed */, + "read_blob_not_found" + ) + ) { + Streams.readFully(inputStream); + } finally { + releaseRequest.countDown(); + logger.info("---> First request is released"); + thread.join(); + logger.info("---> First request has finished"); + } + }); + + assertThat(exception, instanceOf(SdkClientException.class)); + assertThat(exception.getCause(), instanceOf(ConnectionPoolTimeoutException.class)); + + assertThat(exception.getMessage(), containsString("Unable to execute HTTP request: Timeout waiting for connection from pool")); + assertThat(exception.getCause().getMessage(), containsString("Timeout waiting for connection from pool")); + } + public void testReadRetriesAfterMeaningfulProgress() throws Exception { final int maxRetries = between(0, 5); final int bufferSizeBytes = scaledRandomIntBetween( @@ -830,7 +918,15 @@ public void testSuppressedDeletionErrorsAreCapped() { "deletion should not succeed", () -> blobContainer.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobs.iterator()) ); - assertThat(exception.getCause().getSuppressed().length, lessThan(S3BlobStore.MAX_DELETE_EXCEPTIONS)); + + var sdkGeneratedExceptions = 0; + final var innerExceptions = exception.getCause().getSuppressed(); + for (final var innerException : innerExceptions) { + if (innerException instanceof SdkClientException && innerException.getMessage().startsWith("Request attempt ")) { + sdkGeneratedExceptions += 1; + } + } + assertThat(innerExceptions.length - sdkGeneratedExceptions, lessThan(S3BlobStore.MAX_DELETE_EXCEPTIONS)); } public void testTrimmedLogAndCappedSuppressedErrorOnMultiObjectDeletionException() { diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index f52b3f4b53a62..4be0325697975 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -9,21 +9,28 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.SdkPartType; +import software.amazon.awssdk.services.s3.model.StorageClass; +import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStoreException; @@ -37,6 +44,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -44,8 +52,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -109,30 +115,42 @@ public void testExecuteSingleUpload() throws IOException { final StorageClass storageClass = randomFrom(StorageClass.values()); when(blobStore.getStorageClass()).thenReturn(storageClass); - final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null; + final ObjectCannedACL cannedAccessControlList = randomBoolean() ? randomFrom(ObjectCannedACL.values()) : null; if (cannedAccessControlList != null) { when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList); } - final AmazonS3 client = mock(AmazonS3.class); - final AmazonS3Reference clientReference = new AmazonS3Reference(client); - when(blobStore.clientReference()).thenReturn(clientReference); + final S3Client client = configureMockClient(blobStore); - final ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); - when(client.putObject(argumentCaptor.capture())).thenReturn(new PutObjectResult()); + final ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); + final ArgumentCaptor bodyCaptor = ArgumentCaptor.forClass(RequestBody.class); + + when(client.putObject(requestCaptor.capture(), bodyCaptor.capture())).thenReturn(PutObjectResponse.builder().build()); final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[blobSize]); blobContainer.executeSingleUpload(randomPurpose(), blobStore, blobName, inputStream, blobSize); - final PutObjectRequest request = argumentCaptor.getValue(); - assertEquals(bucketName, request.getBucketName()); - assertEquals(blobPath.buildAsString() + blobName, request.getKey()); - assertEquals(inputStream, request.getInputStream()); - assertEquals(blobSize, request.getMetadata().getContentLength()); - assertEquals(storageClass.toString(), request.getStorageClass()); - assertEquals(cannedAccessControlList, request.getCannedAcl()); + final PutObjectRequest request = requestCaptor.getValue(); + assertEquals(bucketName, request.bucket()); + assertEquals(blobPath.buildAsString() + blobName, request.key()); + + assertEquals(Long.valueOf(blobSize), request.contentLength()); + assertEquals(storageClass, request.storageClass()); + assertEquals(cannedAccessControlList, request.acl()); if (serverSideEncryption) { - assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, request.getMetadata().getSSEAlgorithm()); + assertEquals( + PutObjectRequest.builder().serverSideEncryption("AES256").build().sseCustomerAlgorithm(), + request.sseCustomerAlgorithm() + ); + } + + final RequestBody requestBody = bodyCaptor.getValue(); + try (var contentStream = requestBody.contentStreamProvider().newStream()) { + assertEquals(inputStream.available(), blobSize); + // checking that reading from contentStream also reads from inputStream + final int toSkip = between(0, blobSize); + contentStream.skipNBytes(toSkip); + assertEquals(inputStream.available(), blobSize - toSkip); } } @@ -182,21 +200,24 @@ public void testExecuteMultipartUpload() throws IOException { final StorageClass storageClass = randomFrom(StorageClass.values()); when(blobStore.getStorageClass()).thenReturn(storageClass); - final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null; + final ObjectCannedACL cannedAccessControlList = randomBoolean() ? randomFrom(ObjectCannedACL.values()) : null; if (cannedAccessControlList != null) { when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList); } - final AmazonS3 client = mock(AmazonS3.class); - final AmazonS3Reference clientReference = new AmazonS3Reference(client); - when(blobStore.clientReference()).thenReturn(clientReference); + final S3Client client = configureMockClient(blobStore); - final ArgumentCaptor initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class); - final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); - initResult.setUploadId(randomAlphaOfLength(10)); - when(client.initiateMultipartUpload(initArgCaptor.capture())).thenReturn(initResult); + final var uploadId = randomIdentifier(); + final ArgumentCaptor createMultipartUploadRequestCaptor = ArgumentCaptor.forClass( + CreateMultipartUploadRequest.class + ); + when(client.createMultipartUpload(createMultipartUploadRequestCaptor.capture())).thenReturn( + CreateMultipartUploadResponse.builder().uploadId(uploadId).build() + ); - final ArgumentCaptor uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class); + final ArgumentCaptor uploadPartRequestCaptor = ArgumentCaptor.forClass(UploadPartRequest.class); + final ArgumentCaptor uploadPartCopyRequestCaptor = ArgumentCaptor.forClass(UploadPartCopyRequest.class); + final ArgumentCaptor uploadPartBodyCaptor = ArgumentCaptor.forClass(RequestBody.class); final List expectedEtags = new ArrayList<>(); final long partSize = Math.min(bufferSize, blobSize); @@ -206,66 +227,81 @@ public void testExecuteMultipartUpload() throws IOException { totalBytes += partSize; } while (totalBytes < blobSize); - when(client.uploadPart(uploadArgCaptor.capture())).thenAnswer(invocationOnMock -> { - final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0]; - final UploadPartResult response = new UploadPartResult(); - response.setPartNumber(request.getPartNumber()); - response.setETag(expectedEtags.get(request.getPartNumber() - 1)); - return response; + when(client.uploadPart(uploadPartRequestCaptor.capture(), uploadPartBodyCaptor.capture())).thenAnswer(invocationOnMock -> { + final UploadPartRequest request = invocationOnMock.getArgument(0); + return UploadPartResponse.builder().eTag(expectedEtags.get(request.partNumber() - 1)).build(); }); - final ArgumentCaptor compArgCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class); - when(client.completeMultipartUpload(compArgCaptor.capture())).thenReturn(new CompleteMultipartUploadResult()); + final ArgumentCaptor completeMultipartUploadRequestCaptor = ArgumentCaptor.forClass( + CompleteMultipartUploadRequest.class + ); + when(client.completeMultipartUpload(completeMultipartUploadRequestCaptor.capture())).thenReturn( + CompleteMultipartUploadResponse.builder().build() + ); final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); blobContainer.executeMultipartUpload(randomPurpose(), blobStore, blobName, inputStream, blobSize); - final InitiateMultipartUploadRequest initRequest = initArgCaptor.getValue(); - assertEquals(bucketName, initRequest.getBucketName()); - assertEquals(blobPath.buildAsString() + blobName, initRequest.getKey()); - assertEquals(storageClass, initRequest.getStorageClass()); - assertEquals(cannedAccessControlList, initRequest.getCannedACL()); + final CreateMultipartUploadRequest initRequest = createMultipartUploadRequestCaptor.getValue(); + assertEquals(bucketName, initRequest.bucket()); + assertEquals(blobPath.buildAsString() + blobName, initRequest.key()); + assertEquals(storageClass, initRequest.storageClass()); + assertEquals(cannedAccessControlList, initRequest.acl()); if (serverSideEncryption) { - assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, initRequest.getObjectMetadata().getSSEAlgorithm()); + assertEquals( + PutObjectRequest.builder().serverSideEncryption("AES256").build().sseCustomerAlgorithm(), + initRequest.sseCustomerAlgorithm() + ); } - final Tuple numberOfParts = S3BlobContainer.numberOfMultiparts(blobSize, bufferSize); + final Tuple numberOfParts = S3BlobContainer.numberOfMultiparts(blobSize, partSize); - final List uploadRequests = uploadArgCaptor.getAllValues(); - assertEquals(numberOfParts.v1().intValue(), uploadRequests.size()); + final List uploadPartRequests = uploadPartRequestCaptor.getAllValues(); + assertEquals(numberOfParts.v1().intValue(), uploadPartRequests.size()); - for (int i = 0; i < uploadRequests.size(); i++) { - final UploadPartRequest uploadRequest = uploadRequests.get(i); + final List uploadPartBodies = uploadPartBodyCaptor.getAllValues(); + assertEquals(numberOfParts.v1().intValue(), uploadPartBodies.size()); - assertEquals(bucketName, uploadRequest.getBucketName()); - assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey()); - assertEquals(initResult.getUploadId(), uploadRequest.getUploadId()); - assertEquals(i + 1, uploadRequest.getPartNumber()); - assertEquals(inputStream, uploadRequest.getInputStream()); + for (int i = 0; i < uploadPartRequests.size(); i++) { + final UploadPartRequest uploadRequest = uploadPartRequests.get(i); - if (i == (uploadRequests.size() - 1)) { - assertTrue(uploadRequest.isLastPart()); - assertEquals(numberOfParts.v2().longValue(), uploadRequest.getPartSize()); - } else { - assertFalse(uploadRequest.isLastPart()); - assertEquals(bufferSize, uploadRequest.getPartSize()); - } + assertEquals(bucketName, uploadRequest.bucket()); + assertEquals(blobPath.buildAsString() + blobName, uploadRequest.key()); + assertEquals(uploadId, uploadRequest.uploadId()); + assertEquals(i + 1, uploadRequest.partNumber().intValue()); + + assertEquals( + uploadRequest.sdkPartType() + " at " + i + " of " + uploadPartRequests.size(), + uploadRequest.sdkPartType() == SdkPartType.LAST, + i == uploadPartRequests.size() - 1 + ); + + assertEquals( + "part " + i, + uploadRequest.sdkPartType() == SdkPartType.LAST ? Optional.of(numberOfParts.v2()) : Optional.of(bufferSize), + uploadPartBodies.get(i).optionalContentLength() + ); } - final CompleteMultipartUploadRequest compRequest = compArgCaptor.getValue(); - assertEquals(bucketName, compRequest.getBucketName()); - assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey()); - assertEquals(initResult.getUploadId(), compRequest.getUploadId()); + final CompleteMultipartUploadRequest compRequest = completeMultipartUploadRequestCaptor.getValue(); + assertEquals(bucketName, compRequest.bucket()); + assertEquals(blobPath.buildAsString() + blobName, compRequest.key()); + assertEquals(uploadId, compRequest.uploadId()); - final List actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList()); + final List actualETags = compRequest.multipartUpload() + .parts() + .stream() + .map(CompletedPart::eTag) + .collect(Collectors.toList()); assertEquals(expectedEtags, actualETags); + + closeMockClient(blobStore); } public void testExecuteMultipartUploadAborted() { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = BlobPath.EMPTY; final long blobSize = ByteSizeUnit.MB.toBytes(765); final long bufferSize = ByteSizeUnit.MB.toBytes(150); @@ -275,45 +311,52 @@ public void testExecuteMultipartUploadAborted() { when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize); when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values())); - final AmazonS3 client = mock(AmazonS3.class); - final AmazonS3Reference clientReference = new AmazonS3Reference(client); - doAnswer(invocation -> { - clientReference.incRef(); + final S3Client client = mock(S3Client.class); + final SdkHttpClient httpClient = mock(SdkHttpClient.class); + final AmazonS3Reference clientReference = new AmazonS3Reference(client, httpClient); + when(blobStore.clientReference()).then(invocation -> { + clientReference.mustIncRef(); return clientReference; - }).when(blobStore).clientReference(); + }); + when(blobStore.getMetricPublisher(any(), any())).thenReturn(new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) {} + + @Override + public void close() {} + }); final String uploadId = randomAlphaOfLength(25); final int stage = randomInt(2); - final List exceptions = Arrays.asList( - new AmazonClientException("Expected initialization request to fail"), - new AmazonClientException("Expected upload part request to fail"), - new AmazonClientException("Expected completion request to fail") + final List exceptions = Arrays.asList( + S3Exception.builder().message("Expected initialization request to fail").build(), + S3Exception.builder().message("Expected upload part request to fail").build(), + S3Exception.builder().message("Expected completion request to fail").build() ); if (stage == 0) { // Fail the initialization request - when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage)); + when(client.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage)); } else if (stage == 1) { - final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); - initResult.setUploadId(uploadId); - when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult); + final CreateMultipartUploadResponse.Builder initResult = CreateMultipartUploadResponse.builder(); + initResult.uploadId(uploadId); + when(client.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(initResult.build()); // Fail the upload part request - when(client.uploadPart(any(UploadPartRequest.class))).thenThrow(exceptions.get(stage)); + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenThrow(exceptions.get(stage)); } else { - final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); - initResult.setUploadId(uploadId); - when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult); + final CreateMultipartUploadResponse.Builder initResult = CreateMultipartUploadResponse.builder(); + initResult.uploadId(uploadId); + when(client.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(initResult.build()); - when(client.uploadPart(any(UploadPartRequest.class))).thenAnswer(invocationOnMock -> { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenAnswer(invocationOnMock -> { final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0]; - final UploadPartResult response = new UploadPartResult(); - response.setPartNumber(request.getPartNumber()); - response.setETag(randomAlphaOfLength(20)); - return response; + final UploadPartResponse.Builder response = UploadPartResponse.builder(); + response.eTag(randomAlphaOfLength(20)); + return response.build(); }); // Fail the completion request @@ -321,41 +364,70 @@ public void testExecuteMultipartUploadAborted() { } final ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class); - doNothing().when(client).abortMultipartUpload(argumentCaptor.capture()); + when(client.abortMultipartUpload(argumentCaptor.capture())).thenReturn(AbortMultipartUploadResponse.builder().build()); final IOException e = expectThrows(IOException.class, () -> { final S3BlobContainer blobContainer = new S3BlobContainer(BlobPath.EMPTY, blobStore); blobContainer.executeMultipartUpload(randomPurpose(), blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize); }); - assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage()); - assertThat(e.getCause(), instanceOf(AmazonClientException.class)); + assertEquals("Unable to upload or copy object [" + blobName + "] using multipart upload", e.getMessage()); + assertThat(e.getCause(), instanceOf(S3Exception.class)); assertEquals(exceptions.get(stage).getMessage(), e.getCause().getMessage()); if (stage == 0) { - verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)); - verify(client, times(0)).uploadPart(any(UploadPartRequest.class)); + verify(client, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); + verify(client, times(0)).uploadPart(any(UploadPartRequest.class), any(RequestBody.class)); verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); } else { - verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)); + verify(client, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); if (stage == 1) { - verify(client, times(1)).uploadPart(any(UploadPartRequest.class)); + verify(client, times(1)).uploadPart(any(UploadPartRequest.class), any(RequestBody.class)); verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); } else { - verify(client, times(6)).uploadPart(any(UploadPartRequest.class)); + verify(client, times(6)).uploadPart(any(UploadPartRequest.class), any(RequestBody.class)); verify(client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); } verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); final AbortMultipartUploadRequest abortRequest = argumentCaptor.getValue(); - assertEquals(bucketName, abortRequest.getBucketName()); - assertEquals(blobName, abortRequest.getKey()); - assertEquals(uploadId, abortRequest.getUploadId()); + assertEquals(bucketName, abortRequest.bucket()); + assertEquals(blobName, abortRequest.key()); + assertEquals(uploadId, abortRequest.uploadId()); } + + closeMockClient(blobStore); + } + + private static S3Client configureMockClient(S3BlobStore blobStore) { + final S3Client client = mock(S3Client.class); + final SdkHttpClient httpClient = mock(SdkHttpClient.class); + try (AmazonS3Reference clientReference = new AmazonS3Reference(client, httpClient)) { + clientReference.mustIncRef(); // held by the mock, ultimately released in closeMockClient + when(blobStore.clientReference()).then(invocation -> { + clientReference.mustIncRef(); + return clientReference; + }); + when(blobStore.getMetricPublisher(any(), any())).thenReturn(new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) {} + + @Override + public void close() {} + }); + } + return client; + } + + private static void closeMockClient(S3BlobStore blobStore) { + final var finalClientReference = blobStore.clientReference(); + assertFalse(finalClientReference.decRef()); + assertTrue(finalClientReference.decRef()); + assertFalse(finalClientReference.hasReferences()); } public void testNumberOfMultipartsWithZeroPartSize() { @@ -392,23 +464,22 @@ public void testInitCannedACL() { "public-read", "public-read-write", "authenticated-read", - "log-delivery-write", "bucket-owner-read", "bucket-owner-full-control" }; // empty acl - assertThat(S3BlobStore.initCannedACL(null), equalTo(CannedAccessControlList.Private)); - assertThat(S3BlobStore.initCannedACL(""), equalTo(CannedAccessControlList.Private)); + assertThat(S3BlobStore.initCannedACL(null), equalTo(ObjectCannedACL.PRIVATE)); + assertThat(S3BlobStore.initCannedACL(""), equalTo(ObjectCannedACL.PRIVATE)); // it should init cannedACL correctly for (String aclString : aclList) { - CannedAccessControlList acl = S3BlobStore.initCannedACL(aclString); + ObjectCannedACL acl = S3BlobStore.initCannedACL(aclString); assertThat(acl.toString(), equalTo(aclString)); } // it should accept all aws cannedACLs - for (CannedAccessControlList awsList : CannedAccessControlList.values()) { - CannedAccessControlList acl = S3BlobStore.initCannedACL(awsList.toString()); + for (ObjectCannedACL awsList : ObjectCannedACL.values()) { + ObjectCannedACL acl = S3BlobStore.initCannedACL(awsList.toString()); assertThat(acl, equalTo(awsList)); } } @@ -420,28 +491,28 @@ public void testInvalidCannedACL() { public void testInitStorageClass() { // it should default to `standard` - assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard)); - assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard)); + assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.STANDARD)); + assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.STANDARD)); // it should accept [standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering] - assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard)); - assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess)); - assertThat(S3BlobStore.initStorageClass("onezone_ia"), equalTo(StorageClass.OneZoneInfrequentAccess)); - assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy)); - assertThat(S3BlobStore.initStorageClass("intelligent_tiering"), equalTo(StorageClass.IntelligentTiering)); + assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.STANDARD)); + assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.STANDARD_IA)); + assertThat(S3BlobStore.initStorageClass("onezone_ia"), equalTo(StorageClass.ONEZONE_IA)); + assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.REDUCED_REDUNDANCY)); + assertThat(S3BlobStore.initStorageClass("intelligent_tiering"), equalTo(StorageClass.INTELLIGENT_TIERING)); } public void testCaseInsensitiveStorageClass() { - assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard)); - assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess)); - assertThat(S3BlobStore.initStorageClass("oNeZoNe_iA"), equalTo(StorageClass.OneZoneInfrequentAccess)); - assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy)); - assertThat(S3BlobStore.initStorageClass("intelLigeNt_tieriNG"), equalTo(StorageClass.IntelligentTiering)); + assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.STANDARD)); + assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.STANDARD_IA)); + assertThat(S3BlobStore.initStorageClass("oNeZoNe_iA"), equalTo(StorageClass.ONEZONE_IA)); + assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.REDUCED_REDUNDANCY)); + assertThat(S3BlobStore.initStorageClass("intelLigeNt_tieriNG"), equalTo(StorageClass.INTELLIGENT_TIERING)); } public void testInvalidStorageClass() { BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("whatever")); - assertThat(ex.getMessage(), equalTo("`whatever` is not a valid S3 Storage Class.")); + assertThat(ex.getMessage(), equalTo("`whatever` is not a known S3 Storage Class.")); } public void testRejectGlacierStorageClass() { diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index ddc7a1851c663..3de070934bdbd 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -9,9 +9,9 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.services.s3.AmazonS3Client; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.regions.Region; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -20,7 +20,6 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.mockito.Mockito; -import java.io.IOException; import java.util.Map; import static org.hamcrest.Matchers.contains; @@ -36,16 +35,14 @@ public void testThereIsADefaultClientByDefault() { final S3ClientSettings defaultSettings = settings.get("default"); assertThat(defaultSettings.credentials, nullValue()); assertThat(defaultSettings.endpoint, is(emptyString())); - assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); assertThat(defaultSettings.proxyHost, is(emptyString())); assertThat(defaultSettings.proxyPort, is(80)); - assertThat(defaultSettings.proxyScheme, is(Protocol.HTTP)); + assertThat(defaultSettings.proxyScheme, is(HttpScheme.HTTP)); assertThat(defaultSettings.proxyUsername, is(emptyString())); assertThat(defaultSettings.proxyPassword, is(emptyString())); - assertThat(defaultSettings.readTimeoutMillis, is(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT)); - assertThat(defaultSettings.maxConnections, is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); - assertThat(defaultSettings.maxRetries, is(ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry())); - assertThat(defaultSettings.throttleRetries, is(ClientConfiguration.DEFAULT_THROTTLE_RETRIES)); + assertThat(defaultSettings.readTimeoutMillis, is(Math.toIntExact(S3ClientSettings.Defaults.READ_TIMEOUT.millis()))); + assertThat(defaultSettings.maxConnections, is(S3ClientSettings.Defaults.MAX_CONNECTIONS)); + assertThat(defaultSettings.maxRetries, is(S3ClientSettings.Defaults.RETRY_COUNT)); } public void testDefaultClientSettingsCanBeSet() { @@ -58,14 +55,14 @@ public void testDefaultClientSettingsCanBeSet() { assertThat(defaultSettings.maxRetries, is(10)); } - public void testNondefaultClientCreatedBySettingItsSettings() { + public void testNonDefaultClientCreatedBySettingItsSettings() { final Map settings = S3ClientSettings.load( Settings.builder().put("s3.client.another_client.max_retries", 10).build() ); assertThat(settings.keySet(), contains("default", "another_client")); final S3ClientSettings defaultSettings = settings.get("default"); - assertThat(defaultSettings.maxRetries, is(ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry())); + assertThat(defaultSettings.maxRetries, is(S3ClientSettings.Defaults.RETRY_COUNT)); final S3ClientSettings anotherClientSettings = settings.get("another_client"); assertThat(anotherClientSettings.maxRetries, is(10)); @@ -107,9 +104,9 @@ public void testCredentialsTypeWithAccessKeyAndSecretKey() { secureSettings.setString("s3.client.default.secret_key", "secret_key"); final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); final S3ClientSettings defaultSettings = settings.get("default"); - S3BasicCredentials credentials = defaultSettings.credentials; - assertThat(credentials.getAWSAccessKeyId(), is("access_key")); - assertThat(credentials.getAWSSecretKey(), is("secret_key")); + AwsBasicCredentials credentials = (AwsBasicCredentials) defaultSettings.credentials; + assertThat(credentials.accessKeyId(), is("access_key")); + assertThat(credentials.secretAccessKey(), is("secret_key")); } public void testCredentialsTypeWithAccessKeyAndSecretKeyAndSessionToken() { @@ -119,10 +116,10 @@ public void testCredentialsTypeWithAccessKeyAndSecretKeyAndSessionToken() { secureSettings.setString("s3.client.default.session_token", "session_token"); final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); final S3ClientSettings defaultSettings = settings.get("default"); - S3BasicSessionCredentials credentials = (S3BasicSessionCredentials) defaultSettings.credentials; - assertThat(credentials.getAWSAccessKeyId(), is("access_key")); - assertThat(credentials.getAWSSecretKey(), is("secret_key")); - assertThat(credentials.getSessionToken(), is("session_token")); + AwsSessionCredentials credentials = (AwsSessionCredentials) defaultSettings.credentials; + assertThat(credentials.accessKeyId(), is("access_key")); + assertThat(credentials.secretAccessKey(), is("secret_key")); + assertThat(credentials.sessionToken(), is("session_token")); } public void testRefineWithRepoSettings() { @@ -142,19 +139,19 @@ public void testRefineWithRepoSettings() { final String endpoint = "some.host"; final S3ClientSettings refinedSettings = baseSettings.refine(Settings.builder().put("endpoint", endpoint).build()); assertThat(refinedSettings.endpoint, is(endpoint)); - S3BasicSessionCredentials credentials = (S3BasicSessionCredentials) refinedSettings.credentials; - assertThat(credentials.getAWSAccessKeyId(), is("access_key")); - assertThat(credentials.getAWSSecretKey(), is("secret_key")); - assertThat(credentials.getSessionToken(), is("session_token")); + AwsSessionCredentials credentials = (AwsSessionCredentials) refinedSettings.credentials; + assertThat(credentials.accessKeyId(), is("access_key")); + assertThat(credentials.secretAccessKey(), is("secret_key")); + assertThat(credentials.sessionToken(), is("session_token")); } { final S3ClientSettings refinedSettings = baseSettings.refine(Settings.builder().put("path_style_access", true).build()); assertThat(refinedSettings.pathStyleAccess, is(true)); - S3BasicSessionCredentials credentials = (S3BasicSessionCredentials) refinedSettings.credentials; - assertThat(credentials.getAWSAccessKeyId(), is("access_key")); - assertThat(credentials.getAWSSecretKey(), is("secret_key")); - assertThat(credentials.getSessionToken(), is("session_token")); + AwsSessionCredentials credentials = (AwsSessionCredentials) refinedSettings.credentials; + assertThat(credentials.accessKeyId(), is("access_key")); + assertThat(credentials.secretAccessKey(), is("secret_key")); + assertThat(credentials.sessionToken(), is("session_token")); } } @@ -174,30 +171,30 @@ public void testUseChunkedEncodingCanBeSet() { assertThat(settings.get("other").disableChunkedEncoding, is(true)); } - public void testRegionCanBeSet() throws IOException { - final String region = randomAlphaOfLength(5); + public void testRegionCanBeSet() { + final String randomRegion = randomAlphaOfLength(5); final Map settings = S3ClientSettings.load( - Settings.builder().put("s3.client.other.region", region).build() + Settings.builder().put("s3.client.other.region", randomRegion).build() ); - assertThat(settings.get("default").region, is("")); - assertThat(settings.get("other").region, is(region)); - try (var s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class))) { - AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")); - assertThat(other.getSignerRegionOverride(), is(region)); - } - } - public void testSignerOverrideCanBeSet() { - final String signerOverride = randomAlphaOfLength(5); - final Map settings = S3ClientSettings.load( - Settings.builder().put("s3.client.other.signer_override", signerOverride).build() - ); assertThat(settings.get("default").region, is("")); - assertThat(settings.get("other").signerOverride, is(signerOverride)); - ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); - assertThat(defaultConfiguration.getSignerOverride(), nullValue()); - ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other")); - assertThat(configuration.getSignerOverride(), is(signerOverride)); + assertThat(settings.get("other").region, is(randomRegion)); + + try ( + var s3Service = new S3Service( + Mockito.mock(Environment.class), + Settings.EMPTY, + Mockito.mock(ResourceWatcherService.class), + () -> null + ) + ) { + var otherSettings = settings.get("other"); + Region otherRegion = s3Service.getClientRegion(otherSettings); + assertEquals(randomRegion, otherRegion.toString()); + + // by default, we simply do not know the region (which S3Service maps to us-east-1 with cross-region access enabled) + assertNull(s3Service.getClientRegion(settings.get("default"))); + } } public void testMaxConnectionsCanBeSet() { @@ -205,14 +202,10 @@ public void testMaxConnectionsCanBeSet() { final Map settings = S3ClientSettings.load( Settings.builder().put("s3.client.other.max_connections", maxConnections).build() ); - assertThat(settings.get("default").maxConnections, is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); + assertThat(settings.get("default").maxConnections, is(S3ClientSettings.Defaults.MAX_CONNECTIONS)); assertThat(settings.get("other").maxConnections, is(maxConnections)); - ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); - assertThat(defaultConfiguration.getMaxConnections(), is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); - ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other")); - assertThat(configuration.getMaxConnections(), is(maxConnections)); // the default appears in the docs so let's make sure it doesn't change: - assertEquals(50, ClientConfiguration.DEFAULT_MAX_CONNECTIONS); + assertEquals(50, S3ClientSettings.Defaults.MAX_CONNECTIONS); } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 3817af4def888..6d7daea669913 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -9,7 +9,8 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AbstractAmazonS3; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.services.s3.S3Client; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.ReferenceDocs; @@ -26,7 +27,6 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.hamcrest.Matchers; -import org.mockito.Mockito; import java.util.Map; @@ -35,33 +35,41 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class S3RepositoryTests extends ESTestCase { - private static class DummyS3Client extends AbstractAmazonS3 { + private static class DummyS3Client implements S3Client { @Override - public void shutdown() { + public void close() { // TODO check is closed } + + @Override + public String serviceName() { + return "DummyS3Client"; + } } private static class DummyS3Service extends S3Service { DummyS3Service(Environment environment, ResourceWatcherService resourceWatcherService) { - super(environment, Settings.EMPTY, resourceWatcherService); + super(environment, Settings.EMPTY, resourceWatcherService, () -> null); } @Override public AmazonS3Reference client(RepositoryMetadata repositoryMetadata) { - return new AmazonS3Reference(new DummyS3Client()); + return new AmazonS3Reference(new DummyS3Client(), mock(SdkHttpClient.class)); } @Override public void refreshAndClearCache(Map clientsSettings) {} @Override - public void close() {} + public void doClose() { + // nothing to clean up + } } public void testInvalidChunkBufferSizeSettings() { @@ -147,7 +155,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, NamedXContentRegistry.EMPTY, - new DummyS3Service(Mockito.mock(Environment.class), Mockito.mock(ResourceWatcherService.class)), + new DummyS3Service(mock(Environment.class), mock(ResourceWatcherService.class)), BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java index c49d6e2a689bf..e735bae25f662 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java @@ -9,13 +9,15 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; - -import org.apache.http.client.methods.HttpGet; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.io.Streams; import org.elasticsearch.core.Nullable; import org.elasticsearch.repositories.blobstore.RequestedRangeNotSatisfiedException; @@ -25,6 +27,8 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.Arrays; +import java.util.function.BiConsumer; +import java.util.function.ToLongFunction; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.equalTo; @@ -51,7 +55,7 @@ public void testInputStreamIsAborted() throws IOException { final byte[] actualBytes = new byte[randomIntBetween(1, Math.max(1, expectedBytes.length - 1))]; final S3RetryingInputStream stream = createInputStream(expectedBytes, null, null); - stream.read(actualBytes); + assertEquals(actualBytes.length, stream.read(actualBytes)); stream.close(); assertArrayEquals(Arrays.copyOf(expectedBytes, actualBytes.length), actualBytes); @@ -79,7 +83,7 @@ public void testRangeInputStreamIsAborted() throws IOException { final int position = randomIntBetween(0, Math.max(1, expectedBytes.length - length)); final S3RetryingInputStream stream = createInputStream(expectedBytes, position, length); - stream.read(actualBytes); + assertEquals(actualBytes.length, stream.read(actualBytes)); stream.close(); assertArrayEquals(Arrays.copyOfRange(expectedBytes, position, position + actualBytes.length), actualBytes); @@ -115,33 +119,85 @@ public void testReadAfterBlobLengthThrowsRequestedRangeNotSatisfiedException() t } } + public void testContentRangeValidation() throws IOException { + final byte[] bytes = randomByteArrayOfLength(between(100, 200)); + final int position = between(0, 100); + final int length = between(1, 100); + try (var stream = createInputStream(bytes, position, length)) { + + final ToLongFunction lengthSupplier = contentRangeHeader -> stream.tryGetStreamLength( + GetObjectResponse.builder().contentRange(contentRangeHeader).build() + ); + + final var fakeLength = between(1, length); + assertEquals(fakeLength, lengthSupplier.applyAsLong("bytes " + position + "-" + (position + fakeLength - 1) + "/*")); + assertEquals(fakeLength, stream.tryGetStreamLength(GetObjectResponse.builder().contentLength((long) fakeLength).build())); + + final BiConsumer failureMessageAsserter = (contentRangeHeader, expectedMessage) -> assertEquals( + expectedMessage, + expectThrows(IllegalArgumentException.class, () -> lengthSupplier.applyAsLong(contentRangeHeader)).getMessage() + ); + + failureMessageAsserter.accept("invalid", "unexpected Content-range header [invalid], should have started with [bytes ]"); + failureMessageAsserter.accept("bytes invalid", "could not parse Content-range header [bytes invalid], missing hyphen"); + failureMessageAsserter.accept("bytes 0-1", "could not parse Content-range header [bytes 0-1], missing slash"); + + final var badStartPos = randomValueOtherThan(position, () -> between(0, 100)); + final var badStartHeader = Strings.format("bytes %d-%d/*", badStartPos, between(badStartPos, 200)); + failureMessageAsserter.accept( + badStartHeader, + "unexpected Content-range header [" + badStartHeader + "], should have started at " + position + ); + + final var badEndPos = between(position + length + 1, 201); + final var badEndHeader = Strings.format("bytes %d-%d/*", position, badEndPos); + failureMessageAsserter.accept( + badEndHeader, + "unexpected Content-range header [" + badEndHeader + "], should have ended no later than " + (position + length - 1) + ); + } + } + + /** + * Creates a mock BlobStore that returns a mock S3Client, configured to supply a #getObject response. The blob store is then wrapped in + * a {@link S3RetryingInputStream}. + * + * @param data The data to stream. + * @param position The position at which to start reading from the stream. + * @param length How much to read from the data stream starting at {@code position} + * @return A {@link S3RetryingInputStream} that reads from the data stream. + */ private S3RetryingInputStream createInputStream(final byte[] data, @Nullable final Integer position, @Nullable final Integer length) throws IOException { - final AmazonS3 client = mock(AmazonS3.class); + final S3Client client = mock(S3Client.class); final AmazonS3Reference clientReference = mock(AmazonS3Reference.class); when(clientReference.client()).thenReturn(client); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.clientReference()).thenReturn(clientReference); + final MetricPublisher metricPublisher = mock(MetricPublisher.class); + when(blobStore.getMetricPublisher(any(S3BlobStore.Operation.class), any(OperationPurpose.class))).thenReturn(metricPublisher); if (position != null && length != null) { if (data.length <= position) { - var amazonS3Exception = new AmazonS3Exception("test"); - amazonS3Exception.setStatusCode(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()); - when(client.getObject(any(GetObjectRequest.class))).thenThrow(amazonS3Exception); + var s3Exception = S3Exception.builder().message("test"); + s3Exception.statusCode(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()); + when(client.getObject(any(GetObjectRequest.class))).thenThrow(s3Exception.build()); return new S3RetryingInputStream(randomPurpose(), blobStore, "_blob", position, Math.addExact(position, length - 1)); } - final S3Object s3Object = new S3Object(); - s3Object.getObjectMetadata().setContentLength(length); - s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(data, position, length), new HttpGet())); - when(client.getObject(any(GetObjectRequest.class))).thenReturn(s3Object); + ResponseInputStream objectResponse = new ResponseInputStream<>( + GetObjectResponse.builder().contentLength(length.longValue()).build(), + new ByteArrayInputStream(data, position, length) + ); + when(client.getObject(any(GetObjectRequest.class))).thenReturn(objectResponse); return new S3RetryingInputStream(randomPurpose(), blobStore, "_blob", position, Math.addExact(position, length - 1)); } - final S3Object s3Object = new S3Object(); - s3Object.getObjectMetadata().setContentLength(data.length); - s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(data), new HttpGet())); - when(client.getObject(any(GetObjectRequest.class))).thenReturn(s3Object); + ResponseInputStream objectResponse = new ResponseInputStream<>( + GetObjectResponse.builder().contentLength(Long.valueOf(data.length)).build(), + new ByteArrayInputStream(data) + ); + when(client.getObject(any(GetObjectRequest.class))).thenReturn(objectResponse); return new S3RetryingInputStream(randomPurpose(), blobStore, "_blob"); } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java index 7bfaf56127fc7..2701476afa71b 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java @@ -8,23 +8,35 @@ */ package org.elasticsearch.repositories.s3; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.endpoints.S3EndpointParams; +import software.amazon.awssdk.services.s3.endpoints.internal.DefaultS3EndpointProvider; + +import org.apache.logging.log4j.Level; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLog; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.watcher.ResourceWatcherService; -import org.mockito.Mockito; import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.mock; public class S3ServiceTests extends ESTestCase { public void testCachedClientsAreReleased() throws IOException { final S3Service s3Service = new S3Service( - Mockito.mock(Environment.class), + mock(Environment.class), Settings.EMPTY, - Mockito.mock(ResourceWatcherService.class) + mock(ResourceWatcherService.class), + () -> Region.of("es-test-region") ); + s3Service.start(); final Settings settings = Settings.builder().put("endpoint", "http://first").build(); final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); @@ -33,12 +45,143 @@ public void testCachedClientsAreReleased() throws IOException { assertSame(clientSettings, otherClientSettings); final AmazonS3Reference reference = s3Service.client(metadata1); reference.close(); - s3Service.close(); + s3Service.doClose(); final AmazonS3Reference referenceReloaded = s3Service.client(metadata1); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); - s3Service.close(); + s3Service.doClose(); final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); assertNotSame(clientSettings, clientSettingsReloaded); + s3Service.close(); + } + + @TestLogging(reason = "testing WARN log output", value = "org.elasticsearch.repositories.s3.S3Service:WARN") + public void testGetClientRegionFromSetting() { + final var regionRequested = new AtomicBoolean(); + try (var s3Service = new S3Service(mock(Environment.class), Settings.EMPTY, mock(ResourceWatcherService.class), () -> { + assertTrue(regionRequested.compareAndSet(false, true)); + return randomFrom(randomFrom(Region.regions()), Region.of(randomIdentifier()), null); + })) { + s3Service.start(); + assertTrue(regionRequested.get()); + + final var clientName = randomBoolean() ? "default" : randomIdentifier(); + + final var region = randomBoolean() ? randomFrom(Region.regions()) : Region.of(randomIdentifier()); + MockLog.assertThatLogger( + () -> assertSame( + region, + s3Service.getClientRegion( + S3ClientSettings.getClientSettings( + Settings.builder().put("s3.client." + clientName + ".region", region.id()).build(), + clientName + ) + ) + ), + S3Service.class, + new MockLog.UnseenEventExpectation("no warning", S3Service.class.getCanonicalName(), Level.WARN, "*"), + new MockLog.UnseenEventExpectation("no debug", S3Service.class.getCanonicalName(), Level.DEBUG, "*") + ); + } + } + + @TestLogging(reason = "testing WARN log output", value = "org.elasticsearch.repositories.s3.S3Service:WARN") + public void testGetClientRegionFromEndpointSettingGuess() { + final var regionRequested = new AtomicBoolean(); + try (var s3Service = new S3Service(mock(Environment.class), Settings.EMPTY, mock(ResourceWatcherService.class), () -> { + assertTrue(regionRequested.compareAndSet(false, true)); + return randomFrom(randomFrom(Region.regions()), Region.of(randomIdentifier()), null); + })) { + s3Service.start(); + assertTrue(regionRequested.get()); + + final var clientName = randomBoolean() ? "default" : randomIdentifier(); + + final var guessedRegion = randomValueOtherThanMany( + r -> r.isGlobalRegion() || r.id().contains("-gov-"), + () -> randomFrom(Region.regions()) + ); + final var endpointUrl = safeGet( + new DefaultS3EndpointProvider().resolveEndpoint(S3EndpointParams.builder().region(guessedRegion).build()) + ).url(); + final var endpoint = randomFrom(endpointUrl.toString(), endpointUrl.getHost()); + + MockLog.assertThatLogger( + () -> assertEquals( + endpoint, + guessedRegion, + s3Service.getClientRegion( + S3ClientSettings.getClientSettings( + Settings.builder().put("s3.client." + clientName + ".endpoint", endpoint).build(), + clientName + ) + ) + ), + S3Service.class, + new MockLog.SeenEventExpectation( + endpoint + " -> " + guessedRegion, + S3Service.class.getCanonicalName(), + Level.WARN, + Strings.format( + """ + found S3 client with endpoint [%s] but no configured region, guessing it should use [%s]; \ + to suppress this warning, configure the [s3.client.CLIENT_NAME.region] setting on this node""", + endpoint, + guessedRegion.id() + ) + ) + ); + } + } + + @TestLogging(reason = "testing DEBUG log output", value = "org.elasticsearch.repositories.s3.S3Service:DEBUG") + public void testGetClientRegionFromDefault() { + final var regionRequested = new AtomicBoolean(); + final var defaultRegion = randomBoolean() ? randomFrom(Region.regions()) : Region.of(randomIdentifier()); + try (var s3Service = new S3Service(mock(Environment.class), Settings.EMPTY, mock(ResourceWatcherService.class), () -> { + assertTrue(regionRequested.compareAndSet(false, true)); + return defaultRegion; + })) { + s3Service.start(); + assertTrue(regionRequested.get()); + + final var clientName = randomBoolean() ? "default" : randomIdentifier(); + + MockLog.assertThatLogger( + () -> assertSame(defaultRegion, s3Service.getClientRegion(S3ClientSettings.getClientSettings(Settings.EMPTY, clientName))), + S3Service.class, + new MockLog.SeenEventExpectation( + "warning", + S3Service.class.getCanonicalName(), + Level.DEBUG, + "found S3 client with no configured region and no configured endpoint, using region [" + + defaultRegion.id() + + "] from SDK" + ) + ); + } + } + + @TestLogging(reason = "testing WARN log output", value = "org.elasticsearch.repositories.s3.S3Service:WARN") + public void testGetClientRegionFallbackToUsEast1() { + final var regionRequested = new AtomicBoolean(); + try (var s3Service = new S3Service(mock(Environment.class), Settings.EMPTY, mock(ResourceWatcherService.class), () -> { + assertTrue(regionRequested.compareAndSet(false, true)); + return null; + })) { + s3Service.start(); + assertTrue(regionRequested.get()); + + final var clientName = randomBoolean() ? "default" : randomIdentifier(); + + MockLog.assertThatLogger( + () -> assertNull(s3Service.getClientRegion(S3ClientSettings.getClientSettings(Settings.EMPTY, clientName))), + S3Service.class, + new MockLog.SeenEventExpectation("warning", S3Service.class.getCanonicalName(), Level.WARN, """ + found S3 client with no configured region and no configured endpoint, \ + falling back to [us-east-1] and enabling cross-region access; \ + to suppress this warning, configure the [s3.client.CLIENT_NAME.region] setting on this node""") + ); + } } } diff --git a/test/fixtures/aws-fixture-utils/src/main/java/fixture/aws/AwsCredentialsUtils.java b/test/fixtures/aws-fixture-utils/src/main/java/fixture/aws/AwsCredentialsUtils.java index 06dac967dff1a..d1693a0bf0e3c 100644 --- a/test/fixtures/aws-fixture-utils/src/main/java/fixture/aws/AwsCredentialsUtils.java +++ b/test/fixtures/aws-fixture-utils/src/main/java/fixture/aws/AwsCredentialsUtils.java @@ -26,6 +26,7 @@ public enum AwsCredentialsUtils { /** * Region supplier which matches any region. */ + // TODO: replace with DynamicRegionSupplier. public static final Supplier ANY_REGION = () -> "*"; /** diff --git a/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java index 1ed9b6888df8d..b99986793141d 100644 --- a/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java +++ b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java @@ -37,8 +37,8 @@ @SuppressForbidden(reason = "this test uses a HttpServer to emulate the AWS STS endpoint") public class AwsStsHttpHandler implements HttpHandler { - static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; - static final String ROLE_NAME = "sts-fixture-test"; + public static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; + public static final String ROLE_NAME = "sts-fixture-test"; private final BiConsumer newCredentialsConsumer; private final String webIdentityToken; @@ -56,7 +56,7 @@ public void handle(final HttpExchange exchange) throws IOException { final var requestMethod = exchange.getRequestMethod(); final var path = exchange.getRequestURI().getPath(); - if ("POST".equals(requestMethod) && "/assume-role-with-web-identity/".equals(path)) { + if ("POST".equals(requestMethod) && "/".equals(path)) { String body = new String(exchange.getRequestBody().readAllBytes(), StandardCharsets.UTF_8); Map params = Arrays.stream(body.split("&")) diff --git a/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java b/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java index 4094ce18e7aef..b9193f31dbd18 100644 --- a/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java +++ b/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java @@ -134,7 +134,7 @@ private record TestHttpResponse(RestStatus status, BytesReference body) {} private static TestHttpResponse handleRequest(AwsStsHttpHandler handler, Map body) { final var httpExchange = new TestHttpExchange( "POST", - "/assume-role-with-web-identity/", + "/", new BytesArray( body.entrySet() .stream() diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java index dc497a74ea135..71bfa21360470 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java @@ -26,12 +26,6 @@ public class Ec2ImdsHttpFixture extends ExternalResource { - /** - * Name of the JVM system property that allows to override the IMDS endpoint address when using the AWS v1 SDK. - * Can be removed once we only use the v2 SDK. - */ - public static final String ENDPOINT_OVERRIDE_SYSPROP_NAME = "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride"; - /** * Name of the JVM system property that allows to override the IMDS endpoint address when using the AWS v2 SDK. */ diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index 53fa5f2942a2d..abf9158e06634 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -282,7 +282,7 @@ protected abstract static class ErroneousHttpHandler implements DelegatingHttpHa // value is the number of times the request has been seen private final Map requests; - private final HttpHandler delegate; + protected final HttpHandler delegate; private final int maxErrorsPerRequest; @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 82c5b1a54c13c..3deadf49cca30 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -105,7 +105,14 @@ public T environment(String key, String value) { @Override public T environment(String key, Supplier supplier) { - this.environmentProviders.add(s -> Map.of(key, supplier.get())); + this.environmentProviders.add(s -> { + final var value = supplier.get(); + if (value == null) { + return Map.of(); + } else { + return Map.of(key, value); + } + }); return cast(this); } diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java index 67a8b2ac4b66a..647c5de71d898 100644 --- a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java @@ -271,12 +271,7 @@ void ensureSearchFailure() throws IOException { searchRequest.addParameter("size", "10000"); assertThat( expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)).getMessage(), - allOf( - containsString("Access denied"), - containsString("Status Code: 403"), - containsString("Error Code: AccessDenied"), - containsString("failed to read data from cache") - ) + allOf(containsString("Access denied"), containsString("Status Code: 403"), containsString("failed to read data from cache")) ); } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle index 313a11f8ce431..99547e3452fd9 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle @@ -13,6 +13,7 @@ apply plugin: 'elasticsearch.rest-resources' dependencies { javaRestTestImplementation testArtifact(project(xpackModule('snapshot-repo-test-kit'))) javaRestTestImplementation project(':test:fixtures:s3-fixture') + javaRestTestImplementation project(':test:fixtures:aws-fixture-utils') } restResources { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java index e5930e02375b7..0cd2e093bfe83 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.repositories.blobstore.testkit.analyze; +import fixture.aws.DynamicRegionSupplier; import fixture.s3.S3HttpFixture; import org.elasticsearch.common.settings.Settings; @@ -16,6 +17,9 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.util.function.Supplier; + +import static fixture.aws.AwsCredentialsUtils.fixedAccessKey; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; @@ -23,7 +27,13 @@ public class S3RepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTe static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); - public static final S3HttpFixture s3Fixture = new S3HttpFixture(USE_FIXTURE); + private static final Supplier regionSupplier = new DynamicRegionSupplier(); + public static final S3HttpFixture s3Fixture = new S3HttpFixture( + USE_FIXTURE, + "bucket", + "base_path_integration_tests", + fixedAccessKey("s3_test_access_key", regionSupplier, "s3") + ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) @@ -31,6 +41,7 @@ public class S3RepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTe .keystore("s3.client.repo_test_kit.secret_key", System.getProperty("s3SecretKey")) .setting("s3.client.repo_test_kit.protocol", () -> "http", (n) -> USE_FIXTURE) .setting("s3.client.repo_test_kit.endpoint", s3Fixture::getAddress, (n) -> USE_FIXTURE) + .setting("s3.client.repo_test_kit.region", regionSupplier, (n) -> USE_FIXTURE) .setting("xpack.security.enabled", "false") .build(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java index 6007968d7cb4d..47157ea9197e4 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -335,8 +336,12 @@ public StreamInput streamInput() throws IOException { bytesReference, failIfExists ); - } catch (BlobWriteAbortedException e) { - assert request.getAbortWrite() : "write unexpectedly aborted"; + } catch (Exception e) { + if (ExceptionsHelper.unwrap(e, BlobWriteAbortedException.class) != null) { + assert request.getAbortWrite() : "write unexpectedly aborted"; + } else { + throw e; + } } } else { blobContainer.writeBlob(OperationPurpose.REPOSITORY_ANALYSIS, request.blobName, bytesReference, failIfExists);