From be78c0c5d3d8e6172387442933c30692cdd0655d Mon Sep 17 00:00:00 2001 From: Dongie Agnir <261310+dagnir@users.noreply.github.com> Date: Thu, 29 May 2025 13:24:40 -0700 Subject: [PATCH 01/10] Add support for validating models in generator (#6136) * Add support for validating models in generator This commit introduces `ModelValidator` which allows us to do validation of the `IntermediateModel` before generation starts; violations are emitted to a `validation-report.json` file under `generated-sources/sdk/models`. As part of this change, `CodeGenerator` was updated to accept `IntermediateModel` as a parameter. This is because all validators operate on the `IntermediateModel`, which already have any SDK specific customizations applied. Since some validators require referencing multiple models, (such as when we start validating shared shapes), having the ability to pass in `IntermediateModel` directly means we only need to build these models once, upfront. * Changelog * Add javadocs --- .../feature-AWSSDKforJavav2-bd762da.json | 6 + .../amazon/awssdk/codegen/CodeGenerator.java | 106 +++++++++- .../poet/rules/EndpointRulesSpecUtils.java | 37 +++- .../validation/ModelValidationContext.java | 79 +++++++ .../validation/ModelValidationReport.java | 40 ++++ .../codegen/validation/ModelValidator.java | 22 ++ .../codegen/validation/ValidationEntry.java | 61 ++++++ .../codegen/validation/ValidationErrorId.java | 35 +++ .../validation/ValidationErrorSeverity.java | 22 ++ .../awssdk/codegen/CodeGeneratorTest.java | 199 ++++++++++++++++++ .../awssdk/codegen/poet/ClientTestModels.java | 11 + 11 files changed, 610 insertions(+), 8 deletions(-) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-bd762da.json create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java diff --git a/.changes/next-release/feature-AWSSDKforJavav2-bd762da.json b/.changes/next-release/feature-AWSSDKforJavav2-bd762da.json new file mode 100644 index 000000000000..d4fc9915dd8d --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-bd762da.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for defining service model validators and generating valdiation reports during code generation." +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java index 4c097fadb4d2..0bcdbccb4d04 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java @@ -19,6 +19,9 @@ import java.io.File; import java.io.IOException; import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.concurrent.ForkJoinTask; import software.amazon.awssdk.codegen.emitters.GeneratorTask; import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; @@ -26,13 +29,23 @@ import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.validation.ModelValidationContext; +import software.amazon.awssdk.codegen.validation.ModelValidationReport; +import software.amazon.awssdk.codegen.validation.ModelValidator; +import software.amazon.awssdk.codegen.validation.ValidationEntry; import software.amazon.awssdk.utils.Logger; public class CodeGenerator { private static final Logger log = Logger.loggerFor(CodeGenerator.class); private static final String MODEL_DIR_NAME = "models"; - private final C2jModels models; + // TODO: add validators + private static final List DEFAULT_MODEL_VALIDATORS = Collections.emptyList(); + + private final C2jModels c2jModels; + + private final IntermediateModel intermediateModel; + private final IntermediateModel shareModelsTarget; private final String sourcesDirectory; private final String resourcesDirectory; private final String testsDirectory; @@ -42,6 +55,9 @@ public class CodeGenerator { */ private final String fileNamePrefix; + private final List modelValidators; + private final boolean emitValidationReport; + static { // Make sure ClassName is statically initialized before we do anything in parallel. // Parallel static initialization of ClassName and TypeName can result in a deadlock: @@ -50,12 +66,21 @@ public class CodeGenerator { } public CodeGenerator(Builder builder) { - this.models = builder.models; + this.c2jModels = builder.models; + this.intermediateModel = builder.intermediateModel; + + if (this.c2jModels != null && this.intermediateModel != null) { + throw new IllegalArgumentException("Only one of c2jModels and intermediateModel must be specified"); + } + + this.shareModelsTarget = builder.shareModelsTarget; this.sourcesDirectory = builder.sourcesDirectory; this.testsDirectory = builder.testsDirectory; this.resourcesDirectory = builder.resourcesDirectory != null ? builder.resourcesDirectory : builder.sourcesDirectory; this.fileNamePrefix = builder.fileNamePrefix; + this.modelValidators = builder.modelValidators == null ? DEFAULT_MODEL_VALIDATORS : builder.modelValidators; + this.emitValidationReport = builder.emitValidationReport; } public static File getModelDirectory(String outputDirectory) { @@ -76,13 +101,31 @@ public static Builder builder() { * code. */ public void execute() { - try { - IntermediateModel intermediateModel = new IntermediateModelBuilder(models).build(); + ModelValidationReport report = new ModelValidationReport(); + + IntermediateModel modelToGenerate; + if (c2jModels != null) { + modelToGenerate = new IntermediateModelBuilder(c2jModels).build(); + } else { + modelToGenerate = intermediateModel; + } + + List validatorEntries = runModelValidators(modelToGenerate); + report.setValidationEntries(validatorEntries); + if (emitValidationReport) { + writeValidationReport(report); + } + + if (!validatorEntries.isEmpty()) { + throw new RuntimeException("Validation failed. See validation report for details."); + } + + try { if (fileNamePrefix != null) { - writeIntermediateModel(intermediateModel); + writeIntermediateModel(modelToGenerate); } - emitCode(intermediateModel); + emitCode(modelToGenerate); } catch (Exception e) { log.error(() -> "Failed to generate code. ", e); @@ -91,7 +134,32 @@ public void execute() { } } + private List runModelValidators(IntermediateModel intermediateModel) { + ModelValidationContext ctx = ModelValidationContext.builder() + .intermediateModel(intermediateModel) + .shareModelsTarget(shareModelsTarget) + .build(); + + List validationEntries = new ArrayList<>(); + + modelValidators.forEach(v -> validationEntries.addAll(v.validateModels(ctx))); + + return validationEntries; + } + + private void writeValidationReport(ModelValidationReport report) { + try { + writeModel(report, "validation-report.json"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + private void writeIntermediateModel(IntermediateModel model) throws IOException { + writeModel(model, fileNamePrefix + "-intermediate.json"); + } + + private void writeModel(Object model, String name) throws IOException { File modelDir = getModelDirectory(sourcesDirectory); PrintWriter writer = null; try { @@ -100,7 +168,7 @@ private void writeIntermediateModel(IntermediateModel model) throws IOException throw new RuntimeException("Failed to create " + outDir.getAbsolutePath()); } - File outputFile = new File(modelDir, fileNamePrefix + "-intermediate.json"); + File outputFile = new File(modelDir, name); if (!outputFile.exists() && !outputFile.createNewFile()) { throw new RuntimeException("Error creating file " + outputFile.getAbsolutePath()); @@ -134,10 +202,14 @@ private GeneratorTask createGeneratorTasks(IntermediateModel intermediateModel) public static final class Builder { private C2jModels models; + private IntermediateModel intermediateModel; + private IntermediateModel shareModelsTarget; private String sourcesDirectory; private String resourcesDirectory; private String testsDirectory; private String fileNamePrefix; + private List modelValidators; + private boolean emitValidationReport; private Builder() { } @@ -147,6 +219,16 @@ public Builder models(C2jModels models) { return this; } + public Builder intermediateModel(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + return this; + } + + public Builder shareModelsTarget(IntermediateModel shareModelsTarget) { + this.shareModelsTarget = shareModelsTarget; + return this; + } + public Builder sourcesDirectory(String sourcesDirectory) { this.sourcesDirectory = sourcesDirectory; return this; @@ -167,6 +249,16 @@ public Builder intermediateModelFileNamePrefix(String fileNamePrefix) { return this; } + public Builder modelValidators(List modelValidators) { + this.modelValidators = modelValidators; + return this; + } + + public Builder emitValidationReport(boolean emitValidationReport) { + this.emitValidationReport = emitValidationReport; + return this; + } + /** * @return An immutable {@link CodeGenerator} object. */ diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java index bad36fe6594f..dfcf68b056fd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java @@ -29,7 +29,12 @@ import com.squareup.javapoet.TypeName; import java.io.IOException; import java.io.UncheckedIOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -52,6 +57,7 @@ import software.amazon.awssdk.utils.internal.CodegenNamingUtils; public class EndpointRulesSpecUtils { + private static final String RULES_ENGINE_RESOURCE_FILES_PREFIX = "software/amazon/awssdk/codegen/rules/"; private final IntermediateModel intermediateModel; public EndpointRulesSpecUtils(IntermediateModel intermediateModel) { @@ -213,16 +219,45 @@ public TypeName resolverReturnType() { public List rulesEngineResourceFiles() { URL currentJarUrl = EndpointRulesSpecUtils.class.getProtectionDomain().getCodeSource().getLocation(); + + // This would happen if the classes aren't loaded from a JAR, e.g. when unit testing + if (!currentJarUrl.toString().endsWith(".jar")) { + return rulesEngineFilesFromDirectory(currentJarUrl); + } + try (JarFile jarFile = new JarFile(currentJarUrl.getFile())) { return jarFile.stream() .map(ZipEntry::getName) - .filter(e -> e.startsWith("software/amazon/awssdk/codegen/rules/")) + .filter(e -> e.startsWith(RULES_ENGINE_RESOURCE_FILES_PREFIX)) .collect(Collectors.toList()); } catch (IOException e) { throw new UncheckedIOException(e); } } + public List rulesEngineFilesFromDirectory(URL location) { + URI locationUri; + try { + locationUri = location.toURI(); + if (!"file".equals(locationUri.getScheme())) { + throw new RuntimeException("Expected location to be a directory"); + } + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + + try { + Path directory = Paths.get(locationUri); + return Files.walk(directory) + // Remove the root directory if the classes, paths are expected to be relative to this directory + .map(f -> directory.relativize(f).toString()) + .filter(f -> f.startsWith(RULES_ENGINE_RESOURCE_FILES_PREFIX)) + .collect(Collectors.toList()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + public List rulesEngineResourceFiles2() { URL currentJarUrl = EndpointRulesSpecUtils.class.getProtectionDomain().getCodeSource().getLocation(); try (JarFile jarFile = new JarFile(currentJarUrl.getFile())) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java new file mode 100644 index 000000000000..55c2bedcba19 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.Optional; +import software.amazon.awssdk.codegen.model.config.customization.ShareModelConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; + +/** + * Context object for {@link ModelValidator}s. This object contains all the information available to the validations in order + * for them to perform their tasks. + */ +public final class ModelValidationContext { + private final IntermediateModel intermediateModel; + private final IntermediateModel shareModelsTarget; + + private ModelValidationContext(Builder builder) { + this.intermediateModel = builder.intermediateModel; + this.shareModelsTarget = builder.shareModelsTarget; + } + + /** + * The service model for which code is being generated. + */ + public IntermediateModel intermediateModel() { + return intermediateModel; + } + + /** + * The model of the service that the currently generating service shares models with. In other words, this is the service + * model for the service defined in {@link ShareModelConfig#getShareModelWith()}. + */ + public Optional shareModelsTarget() { + return Optional.ofNullable(shareModelsTarget); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private IntermediateModel intermediateModel; + private IntermediateModel shareModelsTarget; + + /** + * The service model for which code is being generated. + */ + public Builder intermediateModel(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + return this; + } + + /** + * The model of the service that the currently generating service shares models with. In other words, this is the service + * model for the service defined in {@link ShareModelConfig#getShareModelWith()}. + */ + public Builder shareModelsTarget(IntermediateModel shareModelsTarget) { + this.shareModelsTarget = shareModelsTarget; + return this; + } + + public ModelValidationContext build() { + return new ModelValidationContext(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java new file mode 100644 index 000000000000..1112dc2190d5 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.Collections; +import java.util.List; + +public class ModelValidationReport { + private List validationEntries = Collections.emptyList(); + + public List getValidationEntries() { + return validationEntries; + } + + public void setValidationEntries(List validationEntries) { + if (validationEntries != null) { + this.validationEntries = validationEntries; + } else { + this.validationEntries = Collections.emptyList(); + } + } + + public ModelValidationReport withValidationEntries(List validationEntries) { + setValidationEntries(validationEntries); + return this; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java new file mode 100644 index 000000000000..b544a030eaf5 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java @@ -0,0 +1,22 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.List; + +public interface ModelValidator { + List validateModels(ModelValidationContext context); +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java new file mode 100644 index 000000000000..f0b57032cd8a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +public final class ValidationEntry { + private ValidationErrorId errorId; + private ValidationErrorSeverity severity; + private String detailMessage; + + public ValidationErrorId getErrorId() { + return errorId; + } + + public void setErrorId(ValidationErrorId errorId) { + this.errorId = errorId; + } + + public ValidationEntry withErrorId(ValidationErrorId errorId) { + setErrorId(errorId); + return this; + } + + public ValidationErrorSeverity getSeverity() { + return severity; + } + + public void setSeverity(ValidationErrorSeverity severity) { + this.severity = severity; + } + + public ValidationEntry withSeverity(ValidationErrorSeverity severity) { + setSeverity(severity); + return this; + } + + public String getDetailMessage() { + return detailMessage; + } + + public void setDetailMessage(String detailMessage) { + this.detailMessage = detailMessage; + } + + public ValidationEntry withDetailMessage(String detailMessage) { + setDetailMessage(detailMessage); + return this; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java new file mode 100644 index 000000000000..80a3190b793c --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +public enum ValidationErrorId { + SHARED_MODELS_DIFFER( + "The shared models between two services differ in their definition, which causes differences in the source" + + " files generated by the code generator." + ), + MEMBER_WITH_UNKNOWN_SHAPE( + "The shape declares a member targeting an unknown shape." + ), + ; + + private final String description; + + ValidationErrorId(String description) { + this.description = description; + } + + +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java new file mode 100644 index 000000000000..39b6b015e42a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java @@ -0,0 +1,22 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +public enum ValidationErrorSeverity { + // Denotes an error that MUST be addressed. + DANGER, + ; +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java new file mode 100644 index 000000000000..a3726af351f8 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java @@ -0,0 +1,199 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.poet.ClientTestModels; +import software.amazon.awssdk.codegen.validation.ModelValidator; + +public class CodeGeneratorTest { + private static final String VALIDATION_REPORT_NAME = "validation-report.json"; + + private Path outputDir; + + @BeforeEach + void methodSetup() throws IOException { + outputDir = Files.createTempDirectory(null); + } + + @AfterEach + void methodTeardown() throws IOException { + deleteDirectory(outputDir); + } + + @Test + void build_cj2ModelsAndIntermediateModelSet_throws() { + assertThatThrownBy(() -> CodeGenerator.builder() + .models(C2jModels.builder().build()) + .intermediateModel(new IntermediateModel()) + .build()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Only one of"); + } + + @Test + void execute_emitValidationReportIsFalse_doesNotEmitValidationReport() throws IOException { + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), outputDir); + assertThat(Files.exists(validationReportPath(outputDir))).isFalse(); + } + + @Test + void execute_emitValidationReportIsTrue_emitsValidationReport() throws IOException { + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), outputDir, true, null); + assertThat(Files.exists(validationReportPath(outputDir))).isTrue(); + } + + @Test + void execute_invokesModelValidators() { + ModelValidator mockValidator = mock(ModelValidator.class); + when(mockValidator.validateModels(any())).thenReturn(Collections.emptyList()); + + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), outputDir, true, + Collections.singletonList(mockValidator)); + + verify(mockValidator).validateModels(any()); + } + + @Test + void execute_c2jModelsAndIntermediateModel_generateSameCode() throws IOException { + Path c2jModelsOutputDir = outputDir.resolve("c2jModels"); + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), c2jModelsOutputDir, false, Collections.emptyList()); + + Path intermediateModelOutputDir = outputDir.resolve("intermediate-model"); + generateCodeFromIntermediateModel(ClientTestModels.awsJsonServiceModels(), intermediateModelOutputDir); + + List c2jModels_generatedFiles = Files.walk(c2jModelsOutputDir) + .sorted() + .map(c2jModelsOutputDir::relativize) + .collect(Collectors.toList()); + + List intermediateModels_generatedFiles = Files.walk(intermediateModelOutputDir) + .sorted() + .map(intermediateModelOutputDir::relativize) + .collect(Collectors.toList()); + + assertThat(c2jModels_generatedFiles).isNotEmpty(); + + // Ensure same exact set of files + assertThat(c2jModels_generatedFiles).isEqualTo(intermediateModels_generatedFiles); + + // All files should be exactly the same + for (Path generatedFile : c2jModels_generatedFiles) { + Path c2jGenerated = c2jModelsOutputDir.resolve(generatedFile); + Path intermediateGenerated = intermediateModelOutputDir.resolve(generatedFile); + + if (Files.isDirectory(c2jGenerated)) { + assertThat(Files.isDirectory(intermediateGenerated)).isTrue(); + } else { + assertThat(readToString(c2jGenerated)).isEqualTo(readToString(intermediateGenerated)); + } + } + } + + private void generateCodeFromC2jModels(C2jModels c2jModels, Path outputDir) { + generateCodeFromC2jModels(c2jModels, outputDir, false, null); + } + + private void generateCodeFromC2jModels(C2jModels c2jModels, Path outputDir, + boolean emitValidationReport, + List modelValidators) { + Path sources = outputDir.resolve("generated-sources").resolve("sdk"); + Path resources = outputDir.resolve("generated-resources").resolve("sdk-resources"); + Path tests = outputDir.resolve("generated-test-sources").resolve("sdk-tests"); + + CodeGenerator.builder() + .models(c2jModels) + .sourcesDirectory(sources.toAbsolutePath().toString()) + .resourcesDirectory(resources.toAbsolutePath().toString()) + .testsDirectory(tests.toAbsolutePath().toString()) + .emitValidationReport(emitValidationReport) + .modelValidators(modelValidators) + .build() + .execute(); + } + + private void generateCodeFromIntermediateModel(IntermediateModel intermediateModel, Path outputDir) { + Path sources = outputDir.resolve("generated-sources").resolve("sdk"); + Path resources = outputDir.resolve("generated-resources").resolve("sdk-resources"); + Path tests = outputDir.resolve("generated-test-sources").resolve("sdk-tests"); + + CodeGenerator.builder() + .intermediateModel(intermediateModel) + .sourcesDirectory(sources.toAbsolutePath().toString()) + .resourcesDirectory(resources.toAbsolutePath().toString()) + .testsDirectory(tests.toAbsolutePath().toString()) + .build() + .execute(); + } + + private static String readToString(Path p) throws IOException { + ByteBuffer bb = ByteBuffer.wrap(Files.readAllBytes(p)); + return StandardCharsets.UTF_8.decode(bb).toString(); + } + + private static Path validationReportPath(Path root) { + return root.resolve(Paths.get("generated-sources", "sdk", "models", VALIDATION_REPORT_NAME)); + } + + private static void deleteDirectory(Path dir) throws IOException { + Files.walkFileTree(dir, new FileVisitor() { + + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { + return FileVisitResult.TERMINATE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java index 308aa69ea487..afcc888e77d9 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java @@ -47,6 +47,17 @@ public static IntermediateModel awsJsonServiceModels() { return new IntermediateModelBuilder(models).build(); } + public static C2jModels awsJsonServiceC2jModels() { + File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/json/service-2.json").getFile()); + File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/json/customization.config").getFile()); + File paginatorsModel = new File(ClientTestModels.class.getResource("client/c2j/json/paginators.json").getFile()); + return C2jModels.builder() + .serviceModel(getServiceModel(serviceModel)) + .customizationConfig(getCustomizationConfig(customizationModel)) + .paginatorsModel(getPaginatorsModel(paginatorsModel)) + .build(); + } + public static IntermediateModel cborServiceModels() { File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/json/service-2.json").getFile()); File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/cbor/customization.config").getFile()); From f981caa2785d07e00ae873bd4364f8e998fb1499 Mon Sep 17 00:00:00 2001 From: Dongie Agnir <261310+dagnir@users.noreply.github.com> Date: Fri, 30 May 2025 10:07:12 -0700 Subject: [PATCH 02/10] Update generator plugin to support validators (#6142) This commit updates the generation mojo to suppport validators: - Add flag for controling whether the validation report is written to disk - Build all models to IntermediateModel upfront - If a service model targets another for sharing shapes, also pass in the target service IntermedidateModel --- ...ture-CodeGeneratorMavenPlugin-64c91e8.json | 6 ++ codegen-maven-plugin/pom.xml | 5 + .../codegen/maven/plugin/GenerationMojo.java | 98 ++++++++++++++++--- 3 files changed, 94 insertions(+), 15 deletions(-) create mode 100644 .changes/next-release/feature-CodeGeneratorMavenPlugin-64c91e8.json diff --git a/.changes/next-release/feature-CodeGeneratorMavenPlugin-64c91e8.json b/.changes/next-release/feature-CodeGeneratorMavenPlugin-64c91e8.json new file mode 100644 index 000000000000..91090c74555f --- /dev/null +++ b/.changes/next-release/feature-CodeGeneratorMavenPlugin-64c91e8.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Code Generator Maven Plugin", + "contributor": "", + "description": "Update the generator plugin to support model validation during code generation. In addition, this adds the `writeValidationReport` flag to support writing the validation report to disk." +} diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index af92ffb4edb8..0533043d5e7f 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -57,6 +57,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + utils + software.amazon.awssdk + ${awsjavasdk.version} + org.junit.jupiter junit-jupiter diff --git a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java index 4ce4e7be116b..8b088846b055 100644 --- a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java +++ b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java @@ -21,7 +21,11 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.BasicFileAttributes; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; @@ -30,21 +34,23 @@ import org.apache.maven.project.MavenProject; import software.amazon.awssdk.codegen.C2jModels; import software.amazon.awssdk.codegen.CodeGenerator; +import software.amazon.awssdk.codegen.IntermediateModelBuilder; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.rules.endpoints.EndpointTestSuiteModel; import software.amazon.awssdk.codegen.model.service.EndpointRuleSetModel; import software.amazon.awssdk.codegen.model.service.Paginators; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Waiters; import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; +import software.amazon.awssdk.utils.StringUtils; /** * The Maven mojo to generate Java client code using software.amazon.awssdk:codegen module. */ @Mojo(name = "generate") public class GenerationMojo extends AbstractMojo { - private static final String MODEL_FILE = "service-2.json"; private static final String CUSTOMIZATION_CONFIG_FILE = "customization.config"; private static final String WAITERS_FILE = "waiters-2.json"; @@ -62,6 +68,8 @@ public class GenerationMojo extends AbstractMojo { @Parameter(property = "writeIntermediateModel", defaultValue = "false") private boolean writeIntermediateModel; + @Parameter(property = "writeValidationReport", defaultValue = "false") + private boolean writeValidationReport; @Parameter(defaultValue = "${project}", readonly = true) private MavenProject project; @@ -76,22 +84,59 @@ public void execute() throws MojoExecutionException { this.resourcesDirectory = Paths.get(outputDirectory).resolve("generated-resources").resolve("sdk-resources"); this.testsDirectory = Paths.get(outputDirectory).resolve("generated-test-sources").resolve("sdk-tests"); - findModelRoots().forEach(p -> { - Path modelRootPath = p.modelRoot; - getLog().info("Loading from: " + modelRootPath.toString()); - generateCode(C2jModels.builder() - .customizationConfig(p.customizationConfig) - .serviceModel(loadServiceModel(modelRootPath)) - .waitersModel(loadWaiterModel(modelRootPath)) - .paginatorsModel(loadPaginatorModel(modelRootPath)) - .endpointRuleSetModel(loadEndpointRuleSetModel(modelRootPath)) - .endpointTestSuiteModel(loadEndpointTestSuiteModel(modelRootPath)) - .build()); + List generationParams = initGenerationParams(); + + Map serviceNameToModelMap = new HashMap<>(); + + generationParams.forEach( + params -> { + IntermediateModel model = params.intermediateModel; + String lowercaseServiceName = StringUtils.lowerCase(model.getMetadata().getServiceName()); + IntermediateModel previous = serviceNameToModelMap.put(lowercaseServiceName, model); + if (previous != null) { + String warning = String.format("Multiple service models found with service name %s. Model validation " + + "will likely be incorrect", lowercaseServiceName); + getLog().warn(warning); + } + }); + + // Update each param with the intermediate model it shares models with, if any + generationParams.forEach(params -> { + CustomizationConfig customizationConfig = params.intermediateModel.getCustomizationConfig(); + + if (customizationConfig.getShareModelConfig() != null) { + String shareModelWithName = customizationConfig.getShareModelConfig().getShareModelWith(); + params.withShareModelsTarget(serviceNameToModelMap.get(shareModelWithName)); + } }); + + generationParams.forEach(this::generateCode); + project.addCompileSourceRoot(sourcesDirectory.toFile().getAbsolutePath()); project.addTestCompileSourceRoot(testsDirectory.toFile().getAbsolutePath()); } + private List initGenerationParams() throws MojoExecutionException { + List modelRoots = findModelRoots().collect(Collectors.toList()); + + return modelRoots.stream().map(r -> { + Path modelRootPath = r.modelRoot; + getLog().info("Loading from: " + modelRootPath.toString()); + C2jModels c2jModels = C2jModels.builder() + .customizationConfig(r.customizationConfig) + .serviceModel(loadServiceModel(modelRootPath)) + .waitersModel(loadWaiterModel(modelRootPath)) + .paginatorsModel(loadPaginatorModel(modelRootPath)) + .endpointRuleSetModel(loadEndpointRuleSetModel(modelRootPath)) + .endpointTestSuiteModel(loadEndpointTestSuiteModel(modelRootPath)) + .build(); + String intermediateModelFileNamePrefix = intermediateModelFileNamePrefix(c2jModels); + IntermediateModel intermediateModel = new IntermediateModelBuilder(c2jModels).build(); + return new GenerationParams().withIntermediateModel(intermediateModel) + .withIntermediateModelFileNamePrefix(intermediateModelFileNamePrefix); + }).collect(Collectors.toList()); + } + private Stream findModelRoots() throws MojoExecutionException { try { return Files.find(codeGenResources.toPath(), 10, this::isModelFile) @@ -111,13 +156,15 @@ private boolean isModelFile(Path p, BasicFileAttributes a) { return p.toString().endsWith(MODEL_FILE); } - private void generateCode(C2jModels models) { + private void generateCode(GenerationParams params) { CodeGenerator.builder() - .models(models) + .intermediateModel(params.intermediateModel) + .shareModelsTarget(params.shareModelsTarget) .sourcesDirectory(sourcesDirectory.toFile().getAbsolutePath()) .resourcesDirectory(resourcesDirectory.toFile().getAbsolutePath()) .testsDirectory(testsDirectory.toFile().getAbsolutePath()) - .intermediateModelFileNamePrefix(intermediateModelFileNamePrefix(models)) + .intermediateModelFileNamePrefix(params.intermediateModelFileNamePrefix) + .emitValidationReport(writeValidationReport) .build() .execute(); } @@ -178,4 +225,25 @@ private ModelRoot(Path modelRoot, CustomizationConfig customizationConfig) { this.customizationConfig = customizationConfig; } } + + private static class GenerationParams { + private IntermediateModel intermediateModel; + private IntermediateModel shareModelsTarget; + private String intermediateModelFileNamePrefix; + + public GenerationParams withIntermediateModel(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + return this; + } + + public GenerationParams withShareModelsTarget(IntermediateModel shareModelsTarget) { + this.shareModelsTarget = shareModelsTarget; + return this; + } + + public GenerationParams withIntermediateModelFileNamePrefix(String intermediateModelFileNamePrefix) { + this.intermediateModelFileNamePrefix = intermediateModelFileNamePrefix; + return this; + } + } } From f4edf0b99273939e8fabd6c18a3bd417773c643a Mon Sep 17 00:00:00 2001 From: Dongie Agnir <261310+dagnir@users.noreply.github.com> Date: Mon, 9 Jun 2025 15:07:17 -0700 Subject: [PATCH 03/10] Add SharedModelsValidator (#6146) * Add SharedModelsValidator This validator ensures that when two services share models, any shared model definitions are identical. * Fix check --- .../feature-AWSSDKforJavav2-f004fae.json | 6 + codegen/pom.xml | 5 + .../amazon/awssdk/codegen/CodeGenerator.java | 6 +- .../model/intermediate/ArgumentModel.java | 26 +++ .../model/intermediate/AuthorizerModel.java | 27 +++ .../intermediate/DocumentationModel.java | 20 ++ .../model/intermediate/EndpointDiscovery.java | 18 ++ .../codegen/model/intermediate/EnumModel.java | 21 ++ .../model/intermediate/MemberModel.java | 95 ++++++++ .../model/intermediate/OperationModel.java | 60 +++++ .../intermediate/ParameterHttpMapping.java | 37 +++ .../model/intermediate/ReturnTypeModel.java | 22 ++ .../model/intermediate/ShapeModel.java | 81 +++++++ .../model/intermediate/VariableModel.java | 28 +++ .../ArtificialResultWrapper.java | 23 ++ .../customization/ShapeCustomizationInfo.java | 30 +++ .../codegen/model/service/ContextParam.java | 20 ++ .../validation/SharedModelsValidator.java | 210 ++++++++++++++++++ .../model/intermediate/ArgumentModelTest.java | 29 +++ .../intermediate/AuthorizerModelTest.java | 26 +++ .../model/intermediate/MemberModelTest.java | 40 ++++ .../intermediate/OperationModelTest.java | 35 +++ .../ParameterHttpMappingTest.java | 29 +++ .../intermediate/ReturnTypeModelTest.java | 29 +++ .../model/intermediate/ShapeModelTest.java | 37 +++ .../model/intermediate/VariableModelTest.java | 29 +++ .../ArtificialResultWrapperTest.java | 29 +++ .../ShapeCustomizationInfoTest.java | 29 +++ .../model/service/ContextParamTest.java | 29 +++ .../validation/SharedModelsValidatorTest.java | 148 ++++++++++++ 30 files changed, 1222 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-f004fae.json create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java diff --git a/.changes/next-release/feature-AWSSDKforJavav2-f004fae.json b/.changes/next-release/feature-AWSSDKforJavav2-f004fae.json new file mode 100644 index 000000000000..184769e3f85c --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-f004fae.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for validating that shared models between two services are identical." +} diff --git a/codegen/pom.xml b/codegen/pom.xml index bb9c7874c6a8..d539fc07f4d4 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -239,5 +239,10 @@ mockito-core compile + + nl.jqno.equalsverifier + equalsverifier + test + diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java index 0bcdbccb4d04..b96b62436ea6 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java @@ -32,6 +32,7 @@ import software.amazon.awssdk.codegen.validation.ModelValidationContext; import software.amazon.awssdk.codegen.validation.ModelValidationReport; import software.amazon.awssdk.codegen.validation.ModelValidator; +import software.amazon.awssdk.codegen.validation.SharedModelsValidator; import software.amazon.awssdk.codegen.validation.ValidationEntry; import software.amazon.awssdk.utils.Logger; @@ -39,8 +40,9 @@ public class CodeGenerator { private static final Logger log = Logger.loggerFor(CodeGenerator.class); private static final String MODEL_DIR_NAME = "models"; - // TODO: add validators - private static final List DEFAULT_MODEL_VALIDATORS = Collections.emptyList(); + private static final List DEFAULT_MODEL_VALIDATORS = Collections.singletonList( + new SharedModelsValidator() + ); private final C2jModels c2jModels; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java index 5013db7d3f9e..16e848303a4f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; + public class ArgumentModel extends DocumentationModel { private String name; @@ -61,4 +63,28 @@ public ArgumentModel withIsEnumArg(boolean isEnumArg) { this.isEnumArg = isEnumArg; return this; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ArgumentModel that = (ArgumentModel) o; + return isEnumArg == that.isEnumArg + && Objects.equals(name, that.name) + && Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(name); + result = 31 * result + Objects.hashCode(type); + result = 31 * result + Boolean.hashCode(isEnumArg); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java index ce98c0dfea8e..316f4e741139 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.codegen.model.intermediate; import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; import software.amazon.awssdk.codegen.model.service.Location; public class AuthorizerModel extends DocumentationModel { @@ -63,4 +64,30 @@ public String getAddAuthTokenMethod() { authTokenLocation)); } } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + AuthorizerModel that = (AuthorizerModel) o; + return Objects.equals(name, that.name) + && Objects.equals(interfaceName, that.interfaceName) + && authTokenLocation == that.authTokenLocation + && Objects.equals(tokenName, that.tokenName); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(name); + result = 31 * result + Objects.hashCode(interfaceName); + result = 31 * result + Objects.hashCode(authTokenLocation); + result = 31 * result + Objects.hashCode(tokenName); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java index 5be891040acc..55fd39f4a7c7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java @@ -17,6 +17,8 @@ import static software.amazon.awssdk.codegen.internal.DocumentationUtils.escapeIllegalCharacters; +import java.util.Objects; + public class DocumentationModel { protected String documentation; @@ -28,4 +30,22 @@ public String getDocumentation() { public void setDocumentation(String documentation) { this.documentation = escapeIllegalCharacters(documentation); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DocumentationModel that = (DocumentationModel) o; + return Objects.equals(documentation, that.documentation); + } + + @Override + public int hashCode() { + return Objects.hashCode(documentation); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java index 91a5f3b60f25..e372079fc541 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java @@ -26,4 +26,22 @@ public boolean isRequired() { public void setRequired(boolean required) { this.required = required; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EndpointDiscovery that = (EndpointDiscovery) o; + return required == that.required; + } + + @Override + public int hashCode() { + return Boolean.hashCode(required); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java index f469b5de99fd..652f2c2aca6e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; + /** * Represents a single enum field in a enum. */ @@ -49,4 +51,23 @@ public String getValue() { return value; } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EnumModel enumModel = (EnumModel) o; + return Objects.equals(value, enumModel.value) && Objects.equals(name, enumModel.name); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(value); + result = 31 * result + Objects.hashCode(name); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java index fddf93d4d72d..3e905aa1ed56 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java @@ -28,6 +28,7 @@ import com.squareup.javapoet.ClassName; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import software.amazon.awssdk.codegen.internal.TypeUtils; import software.amazon.awssdk.codegen.model.service.ContextParam; @@ -785,4 +786,98 @@ public void ignoreDataTypeConversionFailures(boolean ignoreDataTypeConversionFai public boolean ignoreDataTypeConversionFailures() { return ignoreDataTypeConversionFailures; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + MemberModel that = (MemberModel) o; + return deprecated == that.deprecated + && required == that.required + && synthetic == that.synthetic + && idempotencyToken == that.idempotencyToken + && isJsonValue == that.isJsonValue + && eventPayload == that.eventPayload + && eventHeader == that.eventHeader + && endpointDiscoveryId == that.endpointDiscoveryId + && sensitive == that.sensitive + && xmlAttribute == that.xmlAttribute + && ignoreDataTypeConversionFailures == that.ignoreDataTypeConversionFailures + && Objects.equals(name, that.name) + && Objects.equals(c2jName, that.c2jName) + && Objects.equals(c2jShape, that.c2jShape) + && Objects.equals(variable, that.variable) + && Objects.equals(setterModel, that.setterModel) + && Objects.equals(getterModel, that.getterModel) + && Objects.equals(http, that.http) + && Objects.equals(deprecatedMessage, that.deprecatedMessage) + && Objects.equals(listModel, that.listModel) + && Objects.equals(mapModel, that.mapModel) + && Objects.equals(enumType, that.enumType) + && Objects.equals(xmlNameSpaceUri, that.xmlNameSpaceUri) + && Objects.equals(shape, that.shape) + && Objects.equals(fluentGetterMethodName, that.fluentGetterMethodName) + && Objects.equals(fluentEnumGetterMethodName, that.fluentEnumGetterMethodName) + && Objects.equals(fluentSetterMethodName, that.fluentSetterMethodName) + && Objects.equals(fluentEnumSetterMethodName, that.fluentEnumSetterMethodName) + && Objects.equals(existenceCheckMethodName, that.existenceCheckMethodName) + && Objects.equals(beanStyleGetterName, that.beanStyleGetterName) + && Objects.equals(beanStyleSetterName, that.beanStyleSetterName) + && Objects.equals(unionEnumTypeName, that.unionEnumTypeName) + && Objects.equals(timestampFormat, that.timestampFormat) + && Objects.equals(deprecatedName, that.deprecatedName) + && Objects.equals(fluentDeprecatedGetterMethodName, that.fluentDeprecatedGetterMethodName) + && Objects.equals(fluentDeprecatedSetterMethodName, that.fluentDeprecatedSetterMethodName) + && Objects.equals(deprecatedBeanStyleSetterMethodName, that.deprecatedBeanStyleSetterMethodName) + && Objects.equals(contextParam, that.contextParam); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(name); + result = 31 * result + Objects.hashCode(c2jName); + result = 31 * result + Objects.hashCode(c2jShape); + result = 31 * result + Objects.hashCode(variable); + result = 31 * result + Objects.hashCode(setterModel); + result = 31 * result + Objects.hashCode(getterModel); + result = 31 * result + Objects.hashCode(http); + result = 31 * result + Boolean.hashCode(deprecated); + result = 31 * result + Objects.hashCode(deprecatedMessage); + result = 31 * result + Boolean.hashCode(required); + result = 31 * result + Boolean.hashCode(synthetic); + result = 31 * result + Objects.hashCode(listModel); + result = 31 * result + Objects.hashCode(mapModel); + result = 31 * result + Objects.hashCode(enumType); + result = 31 * result + Objects.hashCode(xmlNameSpaceUri); + result = 31 * result + Boolean.hashCode(idempotencyToken); + result = 31 * result + Objects.hashCode(shape); + result = 31 * result + Objects.hashCode(fluentGetterMethodName); + result = 31 * result + Objects.hashCode(fluentEnumGetterMethodName); + result = 31 * result + Objects.hashCode(fluentSetterMethodName); + result = 31 * result + Objects.hashCode(fluentEnumSetterMethodName); + result = 31 * result + Objects.hashCode(existenceCheckMethodName); + result = 31 * result + Objects.hashCode(beanStyleGetterName); + result = 31 * result + Objects.hashCode(beanStyleSetterName); + result = 31 * result + Objects.hashCode(unionEnumTypeName); + result = 31 * result + Boolean.hashCode(isJsonValue); + result = 31 * result + Objects.hashCode(timestampFormat); + result = 31 * result + Boolean.hashCode(eventPayload); + result = 31 * result + Boolean.hashCode(eventHeader); + result = 31 * result + Boolean.hashCode(endpointDiscoveryId); + result = 31 * result + Boolean.hashCode(sensitive); + result = 31 * result + Boolean.hashCode(xmlAttribute); + result = 31 * result + Objects.hashCode(deprecatedName); + result = 31 * result + Objects.hashCode(fluentDeprecatedGetterMethodName); + result = 31 * result + Objects.hashCode(fluentDeprecatedSetterMethodName); + result = 31 * result + Objects.hashCode(deprecatedBeanStyleSetterMethodName); + result = 31 * result + Objects.hashCode(contextParam); + result = 31 * result + Boolean.hashCode(ignoreDataTypeConversionFailures); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index a2a060c7a915..6b192644da1d 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import software.amazon.awssdk.codegen.checksum.HttpChecksum; import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.docs.ClientType; @@ -379,4 +380,63 @@ public boolean isUnsignedPayload() { public void setUnsignedPayload(boolean unsignedPayload) { this.unsignedPayload = unsignedPayload; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + OperationModel that = (OperationModel) o; + return deprecated == that.deprecated && hasBlobMemberAsPayload == that.hasBlobMemberAsPayload + && hasStringMemberAsPayload == that.hasStringMemberAsPayload && isAuthenticated == that.isAuthenticated + && isPaginated == that.isPaginated && endpointOperation == that.endpointOperation + && endpointCacheRequired == that.endpointCacheRequired && httpChecksumRequired == that.httpChecksumRequired + && unsignedPayload == that.unsignedPayload && Objects.equals(operationName, that.operationName) + && Objects.equals(serviceProtocol, that.serviceProtocol) + && Objects.equals(deprecatedMessage, that.deprecatedMessage) && Objects.equals(input, that.input) + && Objects.equals(returnType, that.returnType) && Objects.equals(exceptions, that.exceptions) + && Objects.equals(simpleMethods, that.simpleMethods) && authType == that.authType + && Objects.equals(auth, that.auth) && Objects.equals(endpointDiscovery, that.endpointDiscovery) + && Objects.equals(inputShape, that.inputShape) && Objects.equals(outputShape, that.outputShape) + && Objects.equals(endpointTrait, that.endpointTrait) && Objects.equals(httpChecksum, that.httpChecksum) + && Objects.equals(requestcompression, that.requestcompression) + && Objects.equals(staticContextParams, that.staticContextParams) + && Objects.equals(operationContextParams, that.operationContextParams); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(operationName); + result = 31 * result + Objects.hashCode(serviceProtocol); + result = 31 * result + Boolean.hashCode(deprecated); + result = 31 * result + Objects.hashCode(deprecatedMessage); + result = 31 * result + Objects.hashCode(input); + result = 31 * result + Objects.hashCode(returnType); + result = 31 * result + Objects.hashCode(exceptions); + result = 31 * result + Objects.hashCode(simpleMethods); + result = 31 * result + Boolean.hashCode(hasBlobMemberAsPayload); + result = 31 * result + Boolean.hashCode(hasStringMemberAsPayload); + result = 31 * result + Boolean.hashCode(isAuthenticated); + result = 31 * result + Objects.hashCode(authType); + result = 31 * result + Objects.hashCode(auth); + result = 31 * result + Boolean.hashCode(isPaginated); + result = 31 * result + Boolean.hashCode(endpointOperation); + result = 31 * result + Boolean.hashCode(endpointCacheRequired); + result = 31 * result + Objects.hashCode(endpointDiscovery); + result = 31 * result + Objects.hashCode(inputShape); + result = 31 * result + Objects.hashCode(outputShape); + result = 31 * result + Objects.hashCode(endpointTrait); + result = 31 * result + Boolean.hashCode(httpChecksumRequired); + result = 31 * result + Objects.hashCode(httpChecksum); + result = 31 * result + Objects.hashCode(requestcompression); + result = 31 * result + Objects.hashCode(staticContextParams); + result = 31 * result + Objects.hashCode(operationContextParams); + result = 31 * result + Boolean.hashCode(unsignedPayload); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java index 22ed4a8e6880..fc9a776059a7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; import software.amazon.awssdk.codegen.model.service.Location; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -199,4 +200,40 @@ public MarshallLocation getMarshallLocation() { } } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ParameterHttpMapping that = (ParameterHttpMapping) o; + return isPayload == that.isPayload + && isStreaming == that.isStreaming + && flattened == that.flattened + && isGreedy == that.isGreedy + && requiresLength == that.requiresLength + && Objects.equals(unmarshallLocationName, that.unmarshallLocationName) + && Objects.equals(marshallLocationName, that.marshallLocationName) + && Objects.equals(additionalUnmarshallingPath, that.additionalUnmarshallingPath) + && Objects.equals(additionalMarshallingPath, that.additionalMarshallingPath) + && location == that.location; + } + + @Override + public int hashCode() { + int result = Objects.hashCode(unmarshallLocationName); + result = 31 * result + Objects.hashCode(marshallLocationName); + result = 31 * result + Objects.hashCode(additionalUnmarshallingPath); + result = 31 * result + Objects.hashCode(additionalMarshallingPath); + result = 31 * result + Boolean.hashCode(isPayload); + result = 31 * result + Boolean.hashCode(isStreaming); + result = 31 * result + Objects.hashCode(location); + result = 31 * result + Boolean.hashCode(flattened); + result = 31 * result + Boolean.hashCode(isGreedy); + result = 31 * result + Boolean.hashCode(requiresLength); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java index 77dff4c71481..1d46c2802cda 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; + public class ReturnTypeModel { private String returnType; @@ -48,4 +50,24 @@ public ReturnTypeModel withDocumentation(String documentation) { setDocumentation(documentation); return this; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReturnTypeModel that = (ReturnTypeModel) o; + return Objects.equals(returnType, that.returnType) && Objects.equals(documentation, that.documentation); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(returnType); + result = 31 * result + Objects.hashCode(documentation); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java index 098ea46bc7e4..3c26965302d5 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; import software.amazon.awssdk.codegen.model.intermediate.customization.ShapeCustomizationInfo; import software.amazon.awssdk.codegen.model.service.XmlNamespace; @@ -669,4 +670,84 @@ public ShapeModel withIsThrottling(boolean throttling) { this.throttling = throttling; return this; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ShapeModel that = (ShapeModel) o; + return deprecated == that.deprecated + && hasPayloadMember == that.hasPayloadMember + && hasHeaderMember == that.hasHeaderMember + && hasStatusCodeMember == that.hasStatusCodeMember + && hasStreamingMember == that.hasStreamingMember + && hasRequiresLengthMember == that.hasRequiresLengthMember + && wrapper == that.wrapper + && simpleMethod == that.simpleMethod + && fault == that.fault + && isEventStream == that.isEventStream + && isEvent == that.isEvent + && document == that.document + && union == that.union + && retryable == that.retryable + && throttling == that.throttling + && Objects.equals(c2jName, that.c2jName) + && Objects.equals(shapeName, that.shapeName) + && Objects.equals(deprecatedMessage, that.deprecatedMessage) + && Objects.equals(type, that.type) + && Objects.equals(required, that.required) + && Objects.equals(requestSignerClassFqcn, that.requestSignerClassFqcn) + && Objects.equals(endpointDiscovery, that.endpointDiscovery) + && Objects.equals(members, that.members) + && Objects.equals(enums, that.enums) + && Objects.equals(variable, that.variable) + && Objects.equals(marshaller, that.marshaller) + && Objects.equals(unmarshaller, that.unmarshaller) + && Objects.equals(errorCode, that.errorCode) + && Objects.equals(httpStatusCode, that.httpStatusCode) + && Objects.equals(customization, that.customization) + && Objects.equals(xmlNamespace, that.xmlNamespace); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(c2jName); + result = 31 * result + Objects.hashCode(shapeName); + result = 31 * result + Boolean.hashCode(deprecated); + result = 31 * result + Objects.hashCode(deprecatedMessage); + result = 31 * result + Objects.hashCode(type); + result = 31 * result + Objects.hashCode(required); + result = 31 * result + Boolean.hashCode(hasPayloadMember); + result = 31 * result + Boolean.hashCode(hasHeaderMember); + result = 31 * result + Boolean.hashCode(hasStatusCodeMember); + result = 31 * result + Boolean.hashCode(hasStreamingMember); + result = 31 * result + Boolean.hashCode(hasRequiresLengthMember); + result = 31 * result + Boolean.hashCode(wrapper); + result = 31 * result + Boolean.hashCode(simpleMethod); + result = 31 * result + Objects.hashCode(requestSignerClassFqcn); + result = 31 * result + Objects.hashCode(endpointDiscovery); + result = 31 * result + Objects.hashCode(members); + result = 31 * result + Objects.hashCode(enums); + result = 31 * result + Objects.hashCode(variable); + result = 31 * result + Objects.hashCode(marshaller); + result = 31 * result + Objects.hashCode(unmarshaller); + result = 31 * result + Objects.hashCode(errorCode); + result = 31 * result + Objects.hashCode(httpStatusCode); + result = 31 * result + Boolean.hashCode(fault); + result = 31 * result + Objects.hashCode(customization); + result = 31 * result + Boolean.hashCode(isEventStream); + result = 31 * result + Boolean.hashCode(isEvent); + result = 31 * result + Objects.hashCode(xmlNamespace); + result = 31 * result + Boolean.hashCode(document); + result = 31 * result + Boolean.hashCode(union); + result = 31 * result + Boolean.hashCode(retryable); + result = 31 * result + Boolean.hashCode(throttling); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java index bdf0668a9d21..b9355009e748 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java @@ -17,6 +17,7 @@ import java.util.Collection; import java.util.List; +import java.util.Objects; public class VariableModel extends DocumentationModel { @@ -98,4 +99,31 @@ public String getVariableSetterType() { public String toString() { return variableName; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + VariableModel that = (VariableModel) o; + return Objects.equals(variableName, that.variableName) + && Objects.equals(variableType, that.variableType) + && Objects.equals(variableDeclarationType, that.variableDeclarationType); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(variableName); + result = 31 * result + Objects.hashCode(variableType); + result = 31 * result + Objects.hashCode(variableDeclarationType); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java index e8adab25b48c..dd0b91d86301 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate.customization; +import java.util.Objects; + public class ArtificialResultWrapper { private String wrappedMemberName; private String wrappedMemberSimpleType; @@ -34,4 +36,25 @@ public String getWrappedMemberSimpleType() { public void setWrappedMemberSimpleType(String wrappedMemberSimpleType) { this.wrappedMemberSimpleType = wrappedMemberSimpleType; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ArtificialResultWrapper that = (ArtificialResultWrapper) o; + return Objects.equals(wrappedMemberName, that.wrappedMemberName) + && Objects.equals(wrappedMemberSimpleType, that.wrappedMemberSimpleType); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(wrappedMemberName); + result = 31 * result + Objects.hashCode(wrappedMemberSimpleType); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java index b6d3950985b2..2e031eabb9a4 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.codegen.model.intermediate.customization; import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; public class ShapeCustomizationInfo { @@ -72,4 +73,33 @@ public void setStaxTargetDepthOffset(int staxTargetDepthOffset) { public boolean hasStaxTargetDepthOffset() { return hasStaxTargetDepthOffset; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ShapeCustomizationInfo that = (ShapeCustomizationInfo) o; + return skipGeneratingModelClass == that.skipGeneratingModelClass + && skipGeneratingMarshaller == that.skipGeneratingMarshaller + && skipGeneratingUnmarshaller == that.skipGeneratingUnmarshaller + && staxTargetDepthOffset == that.staxTargetDepthOffset + && hasStaxTargetDepthOffset == that.hasStaxTargetDepthOffset + && Objects.equals(artificialResultWrapper, that.artificialResultWrapper); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(artificialResultWrapper); + result = 31 * result + Boolean.hashCode(skipGeneratingModelClass); + result = 31 * result + Boolean.hashCode(skipGeneratingMarshaller); + result = 31 * result + Boolean.hashCode(skipGeneratingUnmarshaller); + result = 31 * result + staxTargetDepthOffset; + result = 31 * result + Boolean.hashCode(hasStaxTargetDepthOffset); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java index 96f363cd84f1..8650d1145bcb 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.service; +import java.util.Objects; + public class ContextParam { private String name; @@ -25,4 +27,22 @@ public String getName() { public void setName(String name) { this.name = name; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ContextParam that = (ContextParam) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java new file mode 100644 index 000000000000..6b7f8471da7c --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java @@ -0,0 +1,210 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.ListModel; +import software.amazon.awssdk.codegen.model.intermediate.MapModel; +import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.utils.Logger; + +/** + * Validator that ensures any shapes shared between two services are completely identical. This validator returns a validation + * entry for each shape that is present in both service models but has differing definitions in each model. + */ +public final class SharedModelsValidator implements ModelValidator { + private static final Logger LOG = Logger.loggerFor(SharedModelsValidator.class); + + @Override + public List validateModels(ModelValidationContext context) { + if (!context.shareModelsTarget().isPresent()) { + return Collections.emptyList(); + } + + return validateSharedShapes(context.intermediateModel(), context.shareModelsTarget().get()); + } + + private List validateSharedShapes(IntermediateModel m1, IntermediateModel m2) { + List errors = new ArrayList<>(); + + Map m1Shapes = m1.getShapes(); + Map m2Shapes = m2.getShapes(); + + m1Shapes.forEach((name, m1Shape) -> { + if (!m2Shapes.containsKey(name)) { + return; + } + + ShapeModel m2Shape = m2Shapes.get(name); + + if (!shapesAreIdentical(m1Shape, m2Shape)) { + String detailMsg = String.format("Services '%s' and '%s' have differing definitions of the shared model '%s'", + m1.getMetadata().getServiceName(), + m2.getMetadata().getServiceName(), + name); + LOG.warn(() -> detailMsg); + + errors.add(new ValidationEntry().withErrorId(ValidationErrorId.SHARED_MODELS_DIFFER) + .withSeverity(ValidationErrorSeverity.DANGER) + .withDetailMessage(detailMsg)); + } + }); + + return errors; + } + + private boolean shapesAreIdentical(ShapeModel m1, ShapeModel m2) { + // Note: We can't simply do m1.equals(m2) because shared models can still differ slightly in the + // marshalling/unmarshalling info such as the exact request operation name on the wire. + // In particular, we leave out comparing the `unmarshaller` and `marshaller` members of ShapeModel. + // Additionally, the List are not compared with equals() because we handle MemberModel equality specially + // as well. + return m1.isDeprecated() == m2.isDeprecated() + && m1.isHasPayloadMember() == m2.isHasPayloadMember() + && m1.isHasHeaderMember() == m2.isHasHeaderMember() + && m1.isHasStatusCodeMember() == m2.isHasStatusCodeMember() + && m1.isHasStreamingMember() == m2.isHasStreamingMember() + && m1.isHasRequiresLengthMember() == m2.isHasRequiresLengthMember() + && m1.isWrapper() == m2.isWrapper() + && m1.isSimpleMethod() == m2.isSimpleMethod() + && m1.isFault() == m2.isFault() + && m1.isEventStream() == m2.isEventStream() + && m1.isEvent() == m2.isEvent() + && m1.isDocument() == m2.isDocument() + && m1.isUnion() == m2.isUnion() + && m1.isRetryable() == m2.isRetryable() + && m1.isThrottling() == m2.isThrottling() + && Objects.equals(m1.getC2jName(), m2.getC2jName()) + && Objects.equals(m1.getShapeName(), m2.getShapeName()) + && Objects.equals(m1.getDeprecatedMessage(), m2.getDeprecatedMessage()) + && Objects.equals(m1.getType(), m2.getType()) + && Objects.equals(m1.getRequired(), m2.getRequired()) + && Objects.equals(m1.getRequestSignerClassFqcn(), m2.getRequestSignerClassFqcn()) + && Objects.equals(m1.getEndpointDiscovery(), m2.getEndpointDiscovery()) + && memberListsAreIdentical(m1.getMembers(), m2.getMembers()) + && Objects.equals(m1.getEnums(), m2.getEnums()) + && Objects.equals(m1.getVariable(), m2.getVariable()) + && Objects.equals(m1.getErrorCode(), m2.getErrorCode()) + && Objects.equals(m1.getHttpStatusCode(), m2.getHttpStatusCode()) + && Objects.equals(m1.getCustomization(), m2.getCustomization()) + && Objects.equals(m1.getXmlNamespace(), m2.getXmlNamespace()) + ; + } + + private boolean memberListsAreIdentical(List memberList1, List memberList2) { + if (memberList1.size() != memberList2.size()) { + return false; + } + + for (int i = 0; i < memberList1.size(); i++) { + MemberModel m1 = memberList1.get(i); + MemberModel m2 = memberList2.get(i); + if (!memberModelsAreIdentical(m1, m2)) { + return false; + } + } + + return true; + } + + private boolean memberModelsAreIdentical(MemberModel m1, MemberModel m2) { + // Similar to ShapeModel, can't call equals() directly. It has a ShapeModel property that is ignored, and ListModel and + // MapModel are treated similarly + return m1.isDeprecated() == m2.isDeprecated() + && m1.isRequired() == m2.isRequired() + && m1.isSynthetic() == m2.isSynthetic() + && m1.isIdempotencyToken() == m2.isIdempotencyToken() + && m1.isJsonValue() == m2.isJsonValue() + && m1.isEventPayload() == m2.isEventPayload() + && m1.isEventHeader() == m2.isEventHeader() + && m1.isEndpointDiscoveryId() == m2.isEndpointDiscoveryId() + && m1.isSensitive() == m2.isSensitive() + && m1.isXmlAttribute() == m2.isXmlAttribute() + && m1.ignoreDataTypeConversionFailures() == m2.ignoreDataTypeConversionFailures() + && Objects.equals(m1.getName(), m2.getName()) + && Objects.equals(m1.getC2jName(), m2.getC2jName()) + && Objects.equals(m1.getC2jShape(), m2.getC2jShape()) + && Objects.equals(m1.getVariable(), m2.getVariable()) + && Objects.equals(m1.getSetterModel(), m2.getSetterModel()) + && Objects.equals(m1.getGetterModel(), m2.getGetterModel()) + && Objects.equals(m1.getHttp(), m2.getHttp()) + && Objects.equals(m1.getDeprecatedMessage(), m2.getDeprecatedMessage()) + // Note: not equals() + && listModelsAreIdentical(m1.getListModel(), m2.getListModel()) + // Note: not equals() + && mapModelsAreIdentical(m1.getMapModel(), m2.getMapModel()) + && Objects.equals(m1.getEnumType(), m2.getEnumType()) + && Objects.equals(m1.getXmlNameSpaceUri(), m2.getXmlNameSpaceUri()) + && Objects.equals(m1.getFluentGetterMethodName(), m2.getFluentGetterMethodName()) + && Objects.equals(m1.getFluentEnumGetterMethodName(), m2.getFluentEnumGetterMethodName()) + && Objects.equals(m1.getFluentSetterMethodName(), m2.getFluentSetterMethodName()) + && Objects.equals(m1.getFluentEnumSetterMethodName(), m2.getFluentEnumSetterMethodName()) + && Objects.equals(m1.getExistenceCheckMethodName(), m2.getExistenceCheckMethodName()) + && Objects.equals(m1.getBeanStyleGetterMethodName(), m2.getBeanStyleGetterMethodName()) + && Objects.equals(m1.getBeanStyleSetterMethodName(), m2.getBeanStyleSetterMethodName()) + && Objects.equals(m1.getUnionEnumTypeName(), m2.getUnionEnumTypeName()) + && Objects.equals(m1.getTimestampFormat(), m2.getTimestampFormat()) + && Objects.equals(m1.getDeprecatedName(), m2.getDeprecatedName()) + && Objects.equals(m1.getDeprecatedFluentGetterMethodName(), m2.getDeprecatedFluentGetterMethodName()) + && Objects.equals(m1.getDeprecatedFluentSetterMethodName(), m2.getDeprecatedFluentSetterMethodName()) + && Objects.equals(m1.getDeprecatedBeanStyleSetterMethodName(), m2.getDeprecatedBeanStyleSetterMethodName()) + && Objects.equals(m1.getContextParam(), m2.getContextParam()); + } + + private boolean listModelsAreIdentical(ListModel m1, ListModel m2) { + if (m1 == null ^ m2 == null) { + return false; + } + + if (m1 == null) { + return true; + } + + return Objects.equals(m1.getImplType(), m2.getImplType()) + && Objects.equals(m1.getMemberType(), m2.getMemberType()) + && Objects.equals(m1.getInterfaceType(), m2.getInterfaceType()) + // Note: not equals() + && memberModelsAreIdentical(m1.getListMemberModel(), m2.getListMemberModel()) + && Objects.equals(m1.getMemberLocationName(), m2.getMemberLocationName()) + && Objects.equals(m1.getMemberAdditionalMarshallingPath(), m2.getMemberAdditionalMarshallingPath()) + && Objects.equals(m1.getMemberAdditionalUnmarshallingPath(), m2.getMemberAdditionalUnmarshallingPath()); + } + + private boolean mapModelsAreIdentical(MapModel m1, MapModel m2) { + if (m1 == null ^ m2 == null) { + return false; + } + + if (m1 == null) { + return true; + } + + return Objects.equals(m1.getImplType(), m2.getImplType()) + && Objects.equals(m1.getInterfaceType(), m2.getInterfaceType()) + && Objects.equals(m1.getKeyLocationName(), m2.getKeyLocationName()) + // Note: not equals() + && memberModelsAreIdentical(m1.getKeyModel(), m2.getKeyModel()) + && Objects.equals(m1.getValueLocationName(), m2.getValueLocationName()) + // Note: not equals() + && memberModelsAreIdentical(m1.getValueModel(), m2.getValueModel()); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java new file mode 100644 index 000000000000..107a6e6cdb66 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ArgumentModelTest { + @Test + public void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ArgumentModel.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java new file mode 100644 index 000000000000..28d8dd845412 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class AuthorizerModelTest { + @Test + public void equals_isCorrect() { + EqualsVerifier.simple().forClass(AuthorizerModel.class).usingGetClass().verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java new file mode 100644 index 000000000000..bd4a0859603f --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class MemberModelTest { + @Test + public void equals_isCorrect() { + ListModel redListModel = new ListModel(); + redListModel.setMemberLocationName("RedLocation"); + ListModel blueListModel = new ListModel(); + blueListModel.setMemberLocationName("BlueLocation"); + + MemberModel redMemberModel = new MemberModel(); + redMemberModel.setC2jName("RedC2jName"); + MemberModel blueMemberModel = new MemberModel(); + blueMemberModel.setC2jName("BlueC2jName"); + + EqualsVerifier.simple().forClass(MemberModel.class) + .withPrefabValues(ListModel.class, redListModel, blueListModel) + .withPrefabValues(MemberModel.class, redMemberModel, blueMemberModel) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java new file mode 100644 index 000000000000..531d0b1aa55e --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class OperationModelTest { + @Test + void equals_isCorrect() { + MemberModel blueMemberModel = new MemberModel(); + blueMemberModel.setName("blue"); + MemberModel redMemberModel = new MemberModel(); + redMemberModel.setName("red"); + + EqualsVerifier.simple() + .forClass(OperationModel.class) + .withPrefabValues(MemberModel.class, blueMemberModel, redMemberModel) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java new file mode 100644 index 000000000000..cd142cb34c2c --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ParameterHttpMappingTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ParameterHttpMapping.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java new file mode 100644 index 000000000000..53e99f514403 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ReturnTypeModelTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ReturnTypeModel.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java new file mode 100644 index 000000000000..08fb79681e96 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ShapeModelTest { + + + @Test + public void equals_isCorrect() { + MemberModel blueMemberModel = new MemberModel(); + blueMemberModel.setName("blue"); + MemberModel redMemberModel = new MemberModel(); + redMemberModel.setName("red"); + + EqualsVerifier.simple() + .forClass(ShapeModel.class) + .withPrefabValues(MemberModel.class, blueMemberModel, redMemberModel) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java new file mode 100644 index 000000000000..55ea2f39123a --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class VariableModelTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(VariableModel.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java new file mode 100644 index 000000000000..aa29412a5f5f --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate.customization; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ArtificialResultWrapperTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ArtificialResultWrapper.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java new file mode 100644 index 000000000000..3126117f100d --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate.customization; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ShapeCustomizationInfoTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ShapeCustomizationInfo.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java new file mode 100644 index 000000000000..937688b70cb4 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.service; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ContextParamTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ContextParam.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java new file mode 100644 index 000000000000..a485956d94bc --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.codegen.model.intermediate.Metadata; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.poet.ClientTestModels; + +public class SharedModelsValidatorTest { + private final ModelValidator validator = new SharedModelsValidator(); + + @Test + void validateModels_noTargetService_noValidationErrors() { + assertThat(runValidation(ClientTestModels.awsJsonServiceModels(), null)).isEmpty(); + } + + @Test + void validateModels_targetServiceTriviallyIdentical_noValidationErrors() { + assertThat(runValidation(ClientTestModels.awsJsonServiceModels(), ClientTestModels.awsJsonServiceModels())).isEmpty(); + } + + @Test + void validateModels_noSharedShapes_noValidationErrors() { + IntermediateModel target = ClientTestModels.awsJsonServiceModels(); + Map renamedShapes = target.getShapes() + .entrySet() + .stream() + .collect(Collectors.toMap(e -> "Copy" + e.getKey(), Map.Entry::getValue)); + target.setShapes(renamedShapes); + + assertThat(runValidation(ClientTestModels.awsJsonServiceModels(), target)).isEmpty(); + } + + @Test + void validateModels_sharedShapesNotIdentical_emitsValidationError() { + IntermediateModel target = ClientTestModels.awsJsonServiceModels(); + Map modifiedShapes = target.getShapes() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, + e -> { + ShapeModel shapeModel = e.getValue(); + shapeModel.setDeprecated(!shapeModel.isDeprecated()); + return shapeModel; + })); + + target.setShapes(modifiedShapes); + + List validationEntries = runValidation(ClientTestModels.awsJsonServiceModels(), target); + + assertThat(validationEntries).hasSize(modifiedShapes.size()); + + assertThat(validationEntries).allMatch(e -> e.getErrorId() == ValidationErrorId.SHARED_MODELS_DIFFER + && e.getSeverity() == ValidationErrorSeverity.DANGER); + } + + @Test + void validateModels_shapesDontHaveSameMemberNames_emitsValidationError() { + IntermediateModel fooService = new IntermediateModel(); + fooService.setMetadata(new Metadata().withServiceName("Foo")); + + IntermediateModel barService = new IntermediateModel(); + barService.setMetadata(new Metadata().withServiceName("Bar")); + + String shapeName = "TestShape"; + + ShapeModel shape1 = new ShapeModel(); + MemberModel member1 = new MemberModel(); + member1.setName("Shape1Member"); + shape1.setMembers(Arrays.asList(member1)); + + ShapeModel shape2 = new ShapeModel(); + MemberModel member2 = new MemberModel(); + member2.setName("Shape2Member"); + shape2.setMembers(Arrays.asList(member2)); + + Map fooServiceShapes = new HashMap<>(); + fooServiceShapes.put(shapeName, shape1); + fooService.setShapes(fooServiceShapes); + + Map barServiceShapes = new HashMap<>(); + barServiceShapes.put(shapeName, shape2); + barService.setShapes(barServiceShapes); + + List validationEntries = runValidation(fooService, barService); + + assertThat(validationEntries).hasSize(1); + } + + @Test + void validateModels_shapesDontHaveSameMembers_emitsValidationError() { + IntermediateModel fooService = new IntermediateModel(); + fooService.setMetadata(new Metadata().withServiceName("Foo")); + + IntermediateModel barService = new IntermediateModel(); + barService.setMetadata(new Metadata().withServiceName("Bar")); + + String shapeName = "TestShape"; + ShapeModel shape1 = new ShapeModel(); + + ShapeModel shape2 = new ShapeModel(); + shape2.setMembers(Arrays.asList(new MemberModel(), new MemberModel())); + + Map fooServiceShapes = new HashMap<>(); + fooServiceShapes.put(shapeName, shape1); + fooService.setShapes(fooServiceShapes); + + Map barServiceShapes = new HashMap<>(); + barServiceShapes.put(shapeName, shape2); + barService.setShapes(barServiceShapes); + + List validationEntries = runValidation(fooService, barService); + + assertThat(validationEntries).hasSize(1); + } + + private List runValidation(IntermediateModel m1, IntermediateModel m2) { + ModelValidationContext ctx = ModelValidationContext.builder() + .intermediateModel(m1) + .shareModelsTarget(m2) + .build(); + + return validator.validateModels(ctx); + } +} From c66439204271c959f230c139cdca48d9e49f1f04 Mon Sep 17 00:00:00 2001 From: Dongie Agnir <261310+dagnir@users.noreply.github.com> Date: Fri, 13 Jun 2025 14:01:06 -0700 Subject: [PATCH 04/10] Validate unknown member reference (#6177) Add validation for the case where an endpoint test references a non-existent operation input shape member. As part of this change, `CodeGenerator` has ben updated to catch `ModelInvalidException` and write the validation entries in the validation-report.json. --- .../amazon/awssdk/codegen/CodeGenerator.java | 8 +++ .../emitters/tasks/BaseGeneratorTasks.java | 2 + .../rules/EndpointRulesClientTestSpec.java | 18 ++++++ .../validation/ModelInvalidException.java | 58 +++++++++++++++++++ .../codegen/validation/ValidationEntry.java | 11 ++++ .../codegen/validation/ValidationErrorId.java | 4 +- .../awssdk/codegen/CodeGeneratorTest.java | 42 ++++++++++++++ .../codegen/incorrect-endpoint-tests.json | 26 +++++++++ 8 files changed, 166 insertions(+), 3 deletions(-) create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java index b96b62436ea6..0361fc6d0fdd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; import software.amazon.awssdk.codegen.validation.ModelValidationContext; import software.amazon.awssdk.codegen.validation.ModelValidationReport; import software.amazon.awssdk.codegen.validation.ModelValidator; @@ -131,6 +132,13 @@ public void execute() { } catch (Exception e) { log.error(() -> "Failed to generate code. ", e); + + if (e instanceof ModelInvalidException && emitValidationReport) { + ModelInvalidException invalidException = (ModelInvalidException) e; + report.setValidationEntries(invalidException.validationEntries()); + writeValidationReport(report); + } + throw new RuntimeException( "Failed to generate code. Exception message : " + e.getMessage(), e); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java index 731f70e0cba3..cdabdbf219cd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java @@ -71,6 +71,8 @@ protected void compute() { ForkJoinTask.invokeAll(createTasks()); log.info(" Completed " + taskName + "."); } + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java index ce7adb6066ee..d077473f532a 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java @@ -61,6 +61,10 @@ import software.amazon.awssdk.codegen.poet.PoetExtension; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.codegen.utils.AuthUtils; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ValidationEntry; +import software.amazon.awssdk.codegen.validation.ValidationErrorId; +import software.amazon.awssdk.codegen.validation.ValidationErrorSeverity; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.rules.testing.AsyncTestCase; @@ -445,6 +449,20 @@ private CodeBlock requestCreation(OperationModel opModel, Map if (opParams != null) { opParams.forEach((n, v) -> { MemberModel memberModel = opModel.getInputShape().getMemberByC2jName(n); + + if (memberModel == null) { + String detailMsg = String.format("Endpoint test definition references member '%s' on the input shape '%s' " + + "but no such member is defined.", n, opModel.getInputShape().getC2jName()); + ValidationEntry entry = + new ValidationEntry() + .withSeverity(ValidationErrorSeverity.DANGER) + .withErrorId(ValidationErrorId.UNKNOWN_SHAPE_MEMBER) + .withDetailMessage(detailMsg); + + throw ModelInvalidException.builder() + .validationEntries(Collections.singletonList(entry)) + .build(); + } CodeBlock memberValue = createMemberValue(memberModel, v); b.add(".$N($L)", memberModel.getFluentSetterMethodName(), memberValue); }); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java new file mode 100644 index 000000000000..28f482328253 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Exception thrown during code generation to signal that the model is invalid. + */ +public class ModelInvalidException extends RuntimeException { + private final List validationEntries; + + private ModelInvalidException(Builder b) { + super("Validation failed with the following errors: " + b.validationEntries); + this.validationEntries = Collections.unmodifiableList(new ArrayList<>(b.validationEntries)); + } + + public List validationEntries() { + return validationEntries; + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private List validationEntries; + + public Builder validationEntries(List validationEntries) { + if (validationEntries == null) { + this.validationEntries = Collections.emptyList(); + } else { + this.validationEntries = validationEntries; + } + + return this; + } + + public ModelInvalidException build() { + return new ModelInvalidException(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java index f0b57032cd8a..4e84bd625185 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.validation; +import software.amazon.awssdk.utils.ToString; + public final class ValidationEntry { private ValidationErrorId errorId; private ValidationErrorSeverity severity; @@ -58,4 +60,13 @@ public ValidationEntry withDetailMessage(String detailMessage) { setDetailMessage(detailMessage); return this; } + + @Override + public String toString() { + return ToString.builder("ValidationEntry") + .add("errorId", errorId) + .add("severity", severity) + .add("detailMessage", detailMessage) + .build(); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java index 80a3190b793c..81fa3adc5676 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java @@ -20,9 +20,7 @@ public enum ValidationErrorId { "The shared models between two services differ in their definition, which causes differences in the source" + " files generated by the code generator." ), - MEMBER_WITH_UNKNOWN_SHAPE( - "The shape declares a member targeting an unknown shape." - ), + UNKNOWN_SHAPE_MEMBER("The model references an unknown shape member."), ; private final String description; diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java index a3726af351f8..92c3ee8300e2 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java @@ -22,7 +22,9 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.nio.file.FileVisitResult; @@ -37,9 +39,13 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.rules.endpoints.EndpointTestSuiteModel; import software.amazon.awssdk.codegen.poet.ClientTestModels; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; import software.amazon.awssdk.codegen.validation.ModelValidator; +import software.amazon.awssdk.codegen.validation.ValidationErrorId; public class CodeGeneratorTest { private static final String VALIDATION_REPORT_NAME = "validation-report.json"; @@ -125,6 +131,30 @@ void execute_c2jModelsAndIntermediateModel_generateSameCode() throws IOException } } + @Test + void execute_endpointsTestReferencesUnknownOperationMember_throwsValidationError() throws IOException { + ModelValidator mockValidator = mock(ModelValidator.class); + when(mockValidator.validateModels(any())).thenReturn(Collections.emptyList()); + + C2jModels referenceModels = ClientTestModels.awsJsonServiceC2jModels(); + + C2jModels c2jModelsWithBadTest = + C2jModels.builder() + .endpointTestSuiteModel(getBrokenEndpointTestSuiteModel()) + .customizationConfig(referenceModels.customizationConfig()) + .serviceModel(referenceModels.serviceModel()) + .paginatorsModel(referenceModels.paginatorsModel()) + .build(); + + assertThatThrownBy(() -> generateCodeFromC2jModels(c2jModelsWithBadTest, outputDir, true, + Collections.singletonList(mockValidator))) + .hasCauseInstanceOf(ModelInvalidException.class) + .matches(e -> { + ModelInvalidException exception = (ModelInvalidException) e.getCause(); + return exception.validationEntries().get(0).getErrorId() == ValidationErrorId.UNKNOWN_SHAPE_MEMBER; + }); + } + private void generateCodeFromC2jModels(C2jModels c2jModels, Path outputDir) { generateCodeFromC2jModels(c2jModels, outputDir, false, null); } @@ -170,6 +200,18 @@ private static Path validationReportPath(Path root) { return root.resolve(Paths.get("generated-sources", "sdk", "models", VALIDATION_REPORT_NAME)); } + private EndpointTestSuiteModel getBrokenEndpointTestSuiteModel() throws IOException { + InputStream resourceAsStream = getClass().getResourceAsStream("incorrect-endpoint-tests.json"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int read; + while ((read = resourceAsStream.read(buffer)) != -1) { + baos.write(buffer, 0, read); + } + String json = StandardCharsets.UTF_8.decode(ByteBuffer.wrap(baos.toByteArray())).toString(); + return Jackson.load(EndpointTestSuiteModel.class, json); + } + private static void deleteDirectory(Path dir) throws IOException { Files.walkFileTree(dir, new FileVisitor() { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json new file mode 100644 index 000000000000..861ba12cf3c5 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json @@ -0,0 +1,26 @@ +{ + "testCases": [ + { + "documentation": "Test references undefined operation member", + "expect": { + "error": "Some error" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "APostOperation", + "operationParams": { + "Foo": "bar" + } + } + ], + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + } + ] +} \ No newline at end of file From dd8fe47d5de2c0ca305c6de1ec18b600cfcc3b85 Mon Sep 17 00:00:00 2001 From: Dongie Agnir <261310+dagnir@users.noreply.github.com> Date: Mon, 16 Jun 2025 14:27:32 -0700 Subject: [PATCH 05/10] Add validation for missing request URI (#6182) * Add validation for missing request URI Throw InvalidModelException if we can't find the request URI for an operation when a member has URI mapping. * Avoid default encoding --- .../feature-AWSSDKforJavav2-7cf1e5c.json | 6 +++ .../codegen/maven/plugin/GenerationMojo.java | 31 ++++++++++++- .../amazon/awssdk/codegen/AddShapes.java | 25 ++++++++--- .../codegen/validation/ValidationErrorId.java | 1 + .../awssdk/codegen/CodeGeneratorTest.java | 42 ++++++++++++++---- .../no-request-uri-operation-service.json | 43 +++++++++++++++++++ 6 files changed, 134 insertions(+), 14 deletions(-) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-7cf1e5c.json create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-7cf1e5c.json b/.changes/next-release/feature-AWSSDKforJavav2-7cf1e5c.json new file mode 100644 index 000000000000..3fc51c387d75 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-7cf1e5c.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add code generation validation for missing request URI on an operation." +} diff --git a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java index 8b088846b055..cead2cbe24d9 100644 --- a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java +++ b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java @@ -17,6 +17,8 @@ import java.io.File; import java.io.IOException; +import java.io.Writer; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -35,6 +37,7 @@ import software.amazon.awssdk.codegen.C2jModels; import software.amazon.awssdk.codegen.CodeGenerator; import software.amazon.awssdk.codegen.IntermediateModelBuilder; +import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; @@ -44,6 +47,8 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Waiters; import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ModelValidationReport; import software.amazon.awssdk.utils.StringUtils; /** @@ -84,7 +89,18 @@ public void execute() throws MojoExecutionException { this.resourcesDirectory = Paths.get(outputDirectory).resolve("generated-resources").resolve("sdk-resources"); this.testsDirectory = Paths.get(outputDirectory).resolve("generated-test-sources").resolve("sdk-tests"); - List generationParams = initGenerationParams(); + List generationParams; + + try { + generationParams = initGenerationParams(); + } catch (ModelInvalidException e) { + if (writeValidationReport) { + ModelValidationReport report = new ModelValidationReport(); + report.setValidationEntries(e.validationEntries()); + emitValidationReport(report); + } + throw e; + } Map serviceNameToModelMap = new HashMap<>(); @@ -137,6 +153,8 @@ private List initGenerationParams() throws MojoExecutionExcept }).collect(Collectors.toList()); } + + private Stream findModelRoots() throws MojoExecutionException { try { return Files.find(codeGenResources.toPath(), 10, this::isModelFile) @@ -216,6 +234,17 @@ private Optional loadOptionalModel(Class clzz, Path location) { return ModelLoaderUtils.loadOptionalModel(clzz, location.toFile()); } + private void emitValidationReport(ModelValidationReport report) { + Path modelsDir = sourcesDirectory.resolve("models"); + try { + Writer writer = Files.newBufferedWriter(modelsDir.resolve("validation-report.json"), + StandardCharsets.UTF_8); + Jackson.writeWithObjectMapper(report, writer); + } catch (IOException e) { + getLog().warn("Failed to write validation report to " + modelsDir, e); + } + } + private static class ModelRoot { private final Path modelRoot; private final CustomizationConfig customizationConfig; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java index 4e7811bebdda..f6d47aac8686 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java @@ -21,6 +21,7 @@ import static software.amazon.awssdk.codegen.internal.Utils.isMapShape; import static software.amazon.awssdk.codegen.internal.Utils.isScalar; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -37,9 +38,14 @@ import software.amazon.awssdk.codegen.model.intermediate.VariableModel; import software.amazon.awssdk.codegen.model.service.Location; import software.amazon.awssdk.codegen.model.service.Member; +import software.amazon.awssdk.codegen.model.service.Operation; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ValidationEntry; +import software.amazon.awssdk.codegen.validation.ValidationErrorId; +import software.amazon.awssdk.codegen.validation.ValidationErrorSeverity; import software.amazon.awssdk.utils.StringUtils; import software.amazon.awssdk.utils.Validate; @@ -345,11 +351,20 @@ private boolean isGreedy(Shape parentShape, Map allC2jShapes, Par * @throws RuntimeException If operation can't be found. */ private String findRequestUri(Shape parentShape, Map allC2jShapes) { - return builder.getService().getOperations().values().stream() - .filter(o -> o.getInput() != null) - .filter(o -> allC2jShapes.get(o.getInput().getShape()).equals(parentShape)) - .map(o -> o.getHttp().getRequestUri()) - .findFirst().orElseThrow(() -> new RuntimeException("Could not find request URI for input shape")); + Optional operation = builder.getService().getOperations().values().stream() + .filter(o -> o.getInput() != null) + .filter(o -> allC2jShapes.get(o.getInput().getShape()).equals(parentShape)) + .findFirst(); + + return operation.map(o -> o.getHttp().getRequestUri()) + .orElseThrow(() -> { + String detailMsg = "Could not find request URI for input shape"; + ValidationEntry entry = + new ValidationEntry().withErrorId(ValidationErrorId.REQUEST_URI_NOT_FOUND) + .withDetailMessage(detailMsg) + .withSeverity(ValidationErrorSeverity.DANGER); + return ModelInvalidException.builder().validationEntries(Collections.singletonList(entry)).build(); + }); } private String deriveUnmarshallerLocationName(Shape memberShape, String memberName, Member member) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java index 81fa3adc5676..37c488e11fc5 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java @@ -21,6 +21,7 @@ public enum ValidationErrorId { + " files generated by the code generator." ), UNKNOWN_SHAPE_MEMBER("The model references an unknown shape member."), + REQUEST_URI_NOT_FOUND("The request URI does not exist."), ; private final String description; diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java index 92c3ee8300e2..c776e0295bea 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java @@ -40,8 +40,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.codegen.internal.Jackson; +import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.rules.endpoints.EndpointTestSuiteModel; +import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.poet.ClientTestModels; import software.amazon.awssdk.codegen.validation.ModelInvalidException; import software.amazon.awssdk.codegen.validation.ModelValidator; @@ -155,6 +157,19 @@ void execute_endpointsTestReferencesUnknownOperationMember_throwsValidationError }); } + @Test + void execute_operationHasNoRequestUri_throwsValidationError() throws IOException { + C2jModels models = C2jModels.builder() + .customizationConfig(CustomizationConfig.create()) + .serviceModel(getMissingRequestUriServiceModel()) + .build(); + + assertThatThrownBy(() -> generateCodeFromC2jModels(models, outputDir, true, Collections.emptyList())) + .isInstanceOf(ModelInvalidException.class) + .matches(e -> ((ModelInvalidException) e).validationEntries().get(0).getErrorId() + == ValidationErrorId.REQUEST_URI_NOT_FOUND); + } + private void generateCodeFromC2jModels(C2jModels c2jModels, Path outputDir) { generateCodeFromC2jModels(c2jModels, outputDir, false, null); } @@ -201,17 +216,28 @@ private static Path validationReportPath(Path root) { } private EndpointTestSuiteModel getBrokenEndpointTestSuiteModel() throws IOException { - InputStream resourceAsStream = getClass().getResourceAsStream("incorrect-endpoint-tests.json"); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - byte[] buffer = new byte[1024]; - int read; - while ((read = resourceAsStream.read(buffer)) != -1) { - baos.write(buffer, 0, read); - } - String json = StandardCharsets.UTF_8.decode(ByteBuffer.wrap(baos.toByteArray())).toString(); + String json = resourceAsString("incorrect-endpoint-tests.json"); return Jackson.load(EndpointTestSuiteModel.class, json); } + private ServiceModel getMissingRequestUriServiceModel() throws IOException { + String json = resourceAsString("no-request-uri-operation-service.json"); + return Jackson.load(ServiceModel.class, json); + } + + private String resourceAsString(String name) throws IOException { + ByteArrayOutputStream baos; + try (InputStream resourceAsStream = getClass().getResourceAsStream(name)) { + baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int read; + while ((read = resourceAsStream.read(buffer)) != -1) { + baos.write(buffer, 0, read); + } + } + return StandardCharsets.UTF_8.decode(ByteBuffer.wrap(baos.toByteArray())).toString(); + } + private static void deleteDirectory(Path dir) throws IOException { Files.walkFileTree(dir, new FileVisitor() { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json new file mode 100644 index 000000000000..d7caffad37e0 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json @@ -0,0 +1,43 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "json-service-endpoint", + "globalEndpoint": "json-service.amazonaws.com", + "protocol": "rest-json", + "serviceAbbreviation": "Rest Json Service", + "serviceFullName": "Some Service That Uses Rest-Json Protocol", + "serviceId": "Rest Json Service", + "signingName": "json-service", + "signatureVersion": "v4", + "uid": "json-service-2010-05-08", + "xmlNamespace": "https://json-service.amazonaws.com/doc/2010-05-08/" + }, + "operations": { + "OperationWithUriMappedParam": { + "name": "OperationWithUriMappedParam", + "http": { + "method": "GET" + }, + "input": { + "shape": "OperationWithUriMappedParamRequest" + } + } + }, + "shapes": { + "OperationWithUriMappedParamRequest": { + "type": "structure", + "members": { + "StringMember": { + "shape": "String", + "location": "uri", + "locationName": "stringMember" + } + } + }, + "String": { + "type": "string" + } + }, + "documentation": "A service that is implemented using the rest-json protocol" +} From 896557a5125bfa85f0636d2026735596358f83da Mon Sep 17 00:00:00 2001 From: Dongie Agnir <261310+dagnir@users.noreply.github.com> Date: Tue, 17 Jun 2025 12:12:50 -0700 Subject: [PATCH 06/10] Merge master (#6188) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Amazon EventBridge Update: Allow for more than 2 characters for location codes in EventBridge ARNs * Synthetics Update: Add support to change ephemeral storage. Add a new field "TestResult" under CanaryRunStatus. * Amazon Elastic Compute Cloud Update: Enable the option to automatically delete underlying Amazon EBS snapshots when deregistering Amazon Machine Images (AMIs) * AWS Network Firewall Update: You can now use VPC endpoint associations to create multiple firewall endpoints for a single firewall. * Cost Optimization Hub Update: This release allows customers to modify their preferred commitment term and payment options. * Updated endpoints.json and partitions.json. * Release 2.31.52. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.53-SNAPSHOT * Amazon SageMaker Service Update: Add maintenance status field to DescribeMlflowTrackingServer API response * AWS Amplify Update: Add support for customizable build instance sizes. CreateApp and UpdateApp operations now accept a new JobConfig parameter composed of BuildComputeType. * AWS CloudTrail Update: CloudTrail Feature Release: Support for Enriched Events with Configurable Context for Event Data Store * AmazonMWAA Update: Amazon MWAA now lets you choose a worker replacement strategy when updating an environment. This release adds two worker replacement strategies: FORCED (default), which stops workers immediately, and GRACEFUL, which allows workers to finish current tasks before shutting down. * Amazon FSx Update: FSx API changes to support the public launch of new Intelligent Tiering storage class on Amazon FSx for Lustre * Auto Scaling Update: Add support for "apple" CpuManufacturer in ABIS * Amazon Interactive Video Service RealTime Update: IVS Real-Time now offers customers the participant replication that allow customers to copy a participant from one stage to another. * AWS DataSync Update: AgentArns field is made optional for Object Storage and Azure Blob location create requests. Location credentials are now managed via Secrets Manager, and may be encrypted with service managed or customer managed keys. Authentication is now optional for Azure Blob locations. * Amazon Connect Service Update: Amazon Connect Service Feature: Email Recipient Limit Increase * AWS Billing and Cost Management Pricing Calculator Update: Add AFTER_DISCOUNTS_AND_COMMITMENTS to Workload Estimate Rate Type. Set ListWorkLoadEstimateUsage maxResults range to minimum of 0 and maximum of 300. * AWS Data Exchange Update: This release adds Tag support for Event Action resource, through which customers can create event actions with Tags and retrieve event actions with Tags. * Amazon Simple Storage Service Update: Adding checksum support for S3 PutBucketOwnershipControls API. * Release 2.31.53. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.54-SNAPSHOT * Amazon SageMaker Service Update: Release new parameter CapacityReservationConfig in ProductionVariant * EMR Serverless Update: This release adds the capability for users to specify an optional Execution IAM policy in the StartJobRun action. The resulting permissions assumed by the job run is the intersection of the permissions in the Execution Role and the specified Execution IAM Policy. * Updated endpoints.json and partitions.json. * Release 2.31.54. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.55-SNAPSHOT * Remove unneeded bundle dependency (#6138) * Pass locals as arguments in endpoint rules (#6131) * Pass locals as arguments in endpoint rules * Add change log entry * Ensure we always return from a tree method * remove pid file (#6149) * Amazon EC2 Container Service Update: Updates Amazon ECS documentation to include note for upcoming default log driver mode change. * AWS Backup Update: You can now subscribe to Amazon SNS notifications and Amazon EventBridge events for backup indexing. You can now receive notifications when a backup index is created, deleted, or fails to create, enhancing your ability to monitor and track your backup operations. * Amazon Elastic Kubernetes Service Update: Add support for filtering ListInsights API calls on MISCONFIGURATION insight category * AWS Parallel Computing Service Update: Introduces SUSPENDING and SUSPENDED states for clusters, compute node groups, and queues. * Amazon Athena Update: Add support for the managed query result in the workgroup APIs. The managed query result configuration enables users to store query results to Athena owned storage. * AWS Compute Optimizer Update: This release enables AWS Compute Optimizer to analyze Amazon Aurora database clusters and generate Aurora I/O-Optimized recommendations. * Agents for Amazon Bedrock Update: This release adds the Agent Lifecycle Paused State feature to Amazon Bedrock agents. By using an agent's alias, you can temporarily suspend agent operations during maintenance, updates, or other situations. * Cost Optimization Hub Update: Support recommendations for Aurora instance and Aurora cluster storage. * AWS EntityResolution Update: Add support for generating match IDs in near real-time. * Synthetics Update: Support for Java runtime handler pattern. * Updated endpoints.json and partitions.json. * Release 2.31.55. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.56-SNAPSHOT * Fix NPE in ProfileFileSupplier.defaultSupplier (#6150) * Fix NPE in ProfileFileSupplier.defaultSupplier * Add changelog * Cleanup builder validation * Fix S3 event parsing when eventName is present but eventTime is not (#6095) * Fix S3 event parsing when eventName is present but eventTime is not * Add test case * Update changelog description * Amazon API Gateway Update: Adds support to set the routing mode for a custom domain name. * EMR Serverless Update: AWS EMR Serverless: Adds a new option in the CancelJobRun API in EMR 7.9.0+, to cancel a job with grace period. This feature is enabled by default with a 120-second grace period for streaming jobs and is not enabled by default for batch jobs. * AmazonApiGatewayV2 Update: Adds support to create routing rules and set the routing mode for a custom domain name. * Release 2.31.56. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.57-SNAPSHOT * Fix DownloadFilter interface to properly return DownloadFilter type (#6148) * Fix DownloadFilter interface to properly return DownloadFilter type - Override Predicate default methods to return DownloadFilter - Add comprehensive parameterized tests * Add null checks and improve documentation for DownloadFilter * Add null pointer exception test for DownloadFilter * Improve DownloadFilter documentation and fix indentation * Sonarcube issue fix - Removed the paranthesis around s3object parameter * Consistency in documentation description * additional changes * docs: add jencymaryjoseph as a contributor for code (#6155) * AWS Amplify Update: Update documentation for cacheConfig in CreateApp API * AWS MediaConnect Update: This release updates the DescribeFlow API to show peer IP addresses. You can now identify the peer IP addresses of devices connected to your sources and outputs. This helps you to verify and troubleshoot your flow's active connections. * Amazon Transcribe Streaming Service Update: AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP * AWS Invoicing Update: Added new Invoicing ListInvoiceSummaries API Operation * AWS Network Firewall Update: You can now monitor flow and alert log metrics from the Network Firewall console. * Amazon Transcribe Service Update: AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP * AWS Elemental MediaConvert Update: This release includes support for embedding and signing C2PA content credentials in MP4 outputs. * Amazon Elastic VMware Service Update: Amazon Elastic VMware Service (Amazon EVS) allows you to run VMware Cloud Foundation (VCF) directly within your Amazon VPC including simplified self-managed migration experience with guided workflow in AWS console or via AWS CLI, get full access to their VCF deployment and VCF license portability. * Amazon SageMaker Service Update: Added support for p6-b200 instance type in SageMaker Training Jobs and Training Plans. * Updated endpoints.json and partitions.json. * Release 2.31.57. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.58-SNAPSHOT * S3 transfer manager client constructor and method recipe (#6151) * recipe added * delete upload recipe * comments addressed * Fixed DefaultDynamoDbAsyncTable::createTable() to create secondary indices that are defined on annotations of the POJO class, similar to DefaultDynamoDbTable::createTable() (#5847) * Adding functionality to config preferred authschemeProvider (#6083) * Adding functionality to config preferred authschemeProvider * adding test coverage * fix formatting checkstyle * Added changelog * Adding test coverage * Use SdkSystemSetting for both env and system * Add profiles to service pom * Fix test * Fix protocol test dependencies * Remove dependency on profiles (use option get in generated code instead) * Fix checkstyle * Move AuthSchemePreferenceProvider out of internal module * More checkstyle fixes * Fixing comments, adding fixture file, renaming provider to resolver, adding coverage * Remove unused import * Add codegen test for preferred auth scheme provider * Check for empty string * Add more documentation * Revert "Add codegen test for preferred auth scheme provider" This reverts commit 141b9d6b984fb305d14f680a545c7f182c4f10d1. * Replace authprovider builder with overridden defaultProvider method * Update core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java Co-authored-by: Olivier L Applin * Update core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java Co-authored-by: Olivier L Applin * Fix import from suggested changes * move test to aws-core module --------- Co-authored-by: Ran Vaknin Co-authored-by: Alex Woods Co-authored-by: Olivier L Applin * docs: add reifiedbeans as a contributor for code (#6159) * docs: update README.md [skip ci] * docs: update .all-contributorsrc [skip ci] --------- Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> * AWS Key Management Service Update: AWS KMS announces the support for on-demand rotation of symmetric-encryption KMS keys with imported key material (EXTERNAL origin). * AWS WAFV2 Update: AWS WAF adds support for ASN-based traffic filtering and support for ASN-based rate limiting. * AWS Billing and Cost Management Pricing Calculator Update: Updating the minimum for List APIs to be 1 (instead of 0) * AWS CloudFormation Update: Add new warning type 'EXCLUDED_PROPERTIES' * Updated endpoints.json and partitions.json. * Release 2.31.58. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.59-SNAPSHOT * Fix expiration in past warning from ProfileFileRefresher. (#6157) * Fix expiration in past warning from ProfileFileRefresher. * Add changelog * Update stale time to 1s to match delay previously added by the CachedSupplier * Revert change to test case * Update changelog description * Add assertion to test that warning is not logged * Add support for service model protocols field (#6161) * Add support for service model protocols field * Changelog * Add support for service model protocols field * Account for Kinesis cbor customization in ProtocolUtils * Deprecate ServiceMetadata.getProtocol() * Deprecate ServiceMetadata.setProtocol() * Add test for Cbor v1 * Agents for Amazon Bedrock Runtime Update: This release introduces the `PromptCreationConfigurations` input parameter, which includes fields to control prompt population for `InvokeAgent` or `InvokeInlineAgent` requests. * Amazon Rekognition Update: Adds support for defining an ordered preference list of different Rekognition Face Liveness challenge types when calling CreateFaceLivenessSession. * Amazon Relational Database Service Update: Include Global Cluster Identifier in DBCluster if the DBCluster is a Global Cluster Member. * Amazon Route 53 Update: Amazon Route 53 now supports the Asia Pacific (Taipei) Region (ap-east-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. * AWS Key Management Service Update: Remove unpopulated KeyMaterialId from Encrypt Response * Amazon S3 Tables Update: S3 Tables now supports getting details about a table via its table ARN. * Updated endpoints.json and partitions.json. * Release 2.31.59. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.60-SNAPSHOT * Add Environment Token support (#6130) * Prototype implementation of Auth Scheme Preference * Add tracking of explictly set token provider * Use generated PreferredAuthSchemeProvider to wrap/delegate * Add generic-service environment token provider + customization config (support for bedrock) * Use generated PreferredAuthSchemeProvider to wrap/delegate * Include tokenProvider in service config when bearer is on the model * Support sourcing token from jvm settings + env variable. * Refactor + use namingStrategy + add tests * Set business metric using an interceptor * Add ability to override token provider on request * Adding functionality to config preferred authschemeProvider * adding test coverage * fix formatting checkstyle * Added changelog * Fix checkstyle on prefered auth scheme provider * Add validation of service+customization * Refactor env token customizaiton logic + add more tests * Testing and cleanup * Adding test coverage * Use SdkSystemSetting for both env and system * Add changelog * Add profiles to service pom * Minor cleanups * Fix test * Fix protocol test dependencies * Remove dependency on profiles (use option get in generated code instead) * Fix checkstyle * Move AuthSchemePreferenceProvider out of internal module * More checkstyle fixes * Refactor and cleanup - move anon classes to full codegen classes. * Update docs * Additional codegen tests * Update core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java Co-authored-by: David Ho <70000000+davidh44@users.noreply.github.com> * Add codegen tset for base client builder w/ env bearer token * Fixing comments, adding fixture file, renaming provider to resolver, adding coverage * Add async client test * Move metric interceptor logic from beforeExecute to beforeMarshall * Move metrics logic into auth scheme interceptor * Remove unused import * Add codegen test for preferred auth scheme provider * Check for empty string * Add more documentation * Revert "Add codegen test for preferred auth scheme provider" This reverts commit 141b9d6b984fb305d14f680a545c7f182c4f10d1. * Replace authprovider builder with overridden defaultProvider method * Update core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java Co-authored-by: Olivier L Applin * Update core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java Co-authored-by: Olivier L Applin * Fix import from suggested changes * move test to aws-core module * Update codegen tests * Fix checkstyle * Fix checkstyle * Improve test coverage * Fix docs --------- Co-authored-by: Ran Vaknin Co-authored-by: David Ho <70000000+davidh44@users.noreply.github.com> Co-authored-by: RanVaknin <50976344+RanVaknin@users.noreply.github.com> Co-authored-by: Olivier L Applin * Use partitions.json to generate Region class (#6120) * Use partitions.json to generate Region class * Updating the test files * Fixing errors * Fixing test failures * Additional changes - Creating a new PartitionsMetadata for partisions.json * Fixing Checkstyle issues * Fixing Checkstyle issues * Fixing Checkstyle issues * Updating file names to resolve conflicts * Fixing Checkstyle issues * Updating file names * Updating file names * Amazon Elastic Compute Cloud Update: Release to support Elastic VMware Service (Amazon EVS) Subnet and Amazon EVS Network Interface Types. * Amazon WorkSpaces Thin Client Update: Add ConflictException to UpdateEnvironment API * AWS AppSync Update: Deprecate `atRestEncryptionEnabled` and `transitEncryptionEnabled` attributes in `CreateApiCache` action. Encryption is always enabled for new caches. * Amazon Elastic File System Update: Added support for Internet Protocol Version 6 (IPv6) on EFS Service APIs and mount targets. * AWS Marketplace Catalog Service Update: The ListEntities API now supports the EntityID, LastModifiedDate, ProductTitle, and Visibility filters for machine learning products. You can also sort using all of those filters. * AWS Cost Explorer Service Update: Support dual-stack endpoints for ce api * Amazon Connect Customer Profiles Update: This release introduces capability of Profile Explorer, using correct ingestion timestamp & using historical data for computing calculated attributes, and new standard objects for T&H as part of Amazon Connect Customer Profiles service. * Updated endpoints.json and partitions.json. * Release 2.31.60. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.61-SNAPSHOT * Removing S3 dependency in URL client (#6162) * Add IMDS dependency to regions module * Removing unused dependency in regions * Moving url-connection integration tests to s3 * Adding Changelog * Removing changelog * Deprecate DefaultCredentialsProvider.create() due to singleton issues (#6166) * Deprecate DefaultCredentialsProvider.create() due to singleton issues * Add deprecation notice to create() method * Update documentation to clearly explain the risks * Recommend using builder().build() instead * Update bugfix-AWSSDKforJavav2-c5cff7a.json * Amazon GameLift Streams Update: Documentation updates for Amazon GameLift Streams to address formatting errors, correct resource ID examples, and update links to other guides * Updated endpoints.json and partitions.json. * Release 2.31.61. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.62-SNAPSHOT * Add Github workflow to lock PR conversation on close (#6169) * Add Github workflow to lock PR conversation on close * Address comments * Amazon Lex Model Building V2 Update: Add support for the Assisted NLU feature to improve bot performance * AWS Control Catalog Update: Introduced ListControlMappings API that retrieves control mappings. Added control aliases and governed resources fields in GetControl and ListControls APIs. New filtering capability in ListControls API, with implementation identifiers and implementation types. * Amazon Elastic Kubernetes Service Update: Release for EKS Pod Identity Cross Account feature and disableSessionTags flag. * AWS WAFV2 Update: WAF now provides two DDoS protection options: resource-level monitoring for Application Load Balancers and the AWSManagedRulesAntiDDoSRuleSet managed rule group for CloudFront distributions. * Amazon Relational Database Service Update: Updates Amazon RDS documentation for Amazon RDS for Db2 cross-Region replicas in standby mode. * AWS Network Manager Update: Add support for public DNS hostname resolution to private IP addresses across Cloud WAN-managed VPCs. Add support for security group referencing across Cloud WAN-managed VPCs. * Updated endpoints.json and partitions.json. * Release 2.31.62. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.63-SNAPSHOT * Add validation for .brazil.json (#6172) * Add validation for .brazil.json This commit adds the `validate-brazil-config` script, which validates the .brazil.json file for correctness. It validates that all modules of the SDK are mapped to a package internally, or explicitly skipped. Additional, it ensures that all external dependencies are also mapped to internal packages. The buildspecs/validate-brazil-config.yml is so that this validation can run as PR build check. * Update codebuild-ci.yml workflow * Remove unused var * 'Remove 'services' set * Use aws-actions/configure-aws-credentials@v4 (#6175) V1 is deprecated. V4 should be a dropin replacement. * AmazonConnectCampaignServiceV2 Update: Added PutInstanceCommunicationLimits and GetInstanceCommunicationLimits APIs * EMR Serverless Update: This release adds support for retrieval of the optional executionIamPolicy field in the GetJobRun API response. * Amazon EC2 Container Service Update: This Amazon ECS release supports updating the capacityProviderStrategy parameter in update-service. * AWS Parallel Computing Service Update: Fixed regex patterns for ARN fields. * Amazon API Gateway Update: Documentation updates for Amazon API Gateway * AWS IoT FleetWise Update: Add new status READY_FOR_CHECKIN used for vehicle synchronisation * AWS Key Management Service Update: AWS KMS announces the support of ML-DSA key pairs that creates post-quantum safe digital signatures. * AmazonApiGatewayV2 Update: Documentation updates for Amazon API Gateway * Release 2.31.63. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.64-SNAPSHOT * Invalid link link in deprecated message of DefaultCredentialsProvider.create, updated it to use simple @code (#6179) * Update api-surface-area-review-verification.yml to exclude non-java c… (#6180) * Update api-surface-area-review-verification.yml to exclude non-java change and non-test change * Update changelog-verification.yml to only trigger java change * Add new module verification CI (#6173) * Add new module verification * Add new module verification * Add tracking of RequestBody/ResponseTransfromer implementations used in UserAgent (#6171) * Add useragent metadata execution attributes and apply them to useragent. Add basic tracking of requestbody/responeTransformer implementations to UA metadata. * Add delegate getter to the Notifying transformer + handle empty class names w/ unknown * Add name methods to body/response transfromer interfaces + add implementations * Checkstyle fixes + changelog * Update docs + AdditionalMetadata use builder + change all name methods to just name. * Use enum + single letter short form for RequestBody/ContentStreamProvider * Migrate to single character name + enum for remaining types * Fix test failures * Fix checkstyle, start adding tests * Add more tests * Try and fix arch test failure * Fix arch test + more test coverage * Improve test coverage * Improve regex for classWithInnerClassesToPattern * Fix minor pr issues * Fix unknown string to use enum * Fix changelog * Amazon Elastic Container Registry Update: The `DescribeImageScanning` API now includes `lastInUseAt` and `InUseCount` fields that can be used to prioritize vulnerability remediation for images that are actively being used. * Amazon Bedrock Update: This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a Amazon SageMaker trained Amazon Nova model into Amazon Bedrock for inference. * AWS Network Firewall Update: You can now create firewalls using a Transit Gateway instead of a VPC, resulting in a TGW attachment. * Amazon SageMaker Service Update: This release 1) adds a new S3DataType Converse for SageMaker training 2)adds C8g R7gd M8g C6in P6 P6e instance type for SageMaker endpoint 3) adds m7i, r7i, c7i instance type for SageMaker Training and Processing. * Release 2.31.64. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.31.65-SNAPSHOT * Transition all scripts to python3 (#6133) * Fix a bug in ConstructorCache when classes are GC'ed but not removed from cache (#6186) * Fix a bug in ConstructorCache when classes are GC'ed but not removed from cache * Added a changelog * Address PR feedback --------- Co-authored-by: AWS <> Co-authored-by: aws-sdk-java-automation <43143862+aws-sdk-java-automation@users.noreply.github.com> Co-authored-by: Zoe Wang Co-authored-by: Manuel Sugawara Co-authored-by: Olivier L Applin Co-authored-by: Alex Woods Co-authored-by: Drew Davis Co-authored-by: jencymaryjoseph <35571282+jencymaryjoseph@users.noreply.github.com> Co-authored-by: John Viegas <70235430+joviegas@users.noreply.github.com> Co-authored-by: Bole1155 <49867651+Fred1155@users.noreply.github.com> Co-authored-by: roamariei Co-authored-by: Ran Vaknin <50976344+RanVaknin@users.noreply.github.com> Co-authored-by: Ran Vaknin Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> Co-authored-by: David Ho <70000000+davidh44@users.noreply.github.com> Co-authored-by: Saranya Somepalli --- .all-contributorsrc | 18 + .attach_pid885 | 0 .changes/2.31.52.json | 42 + .changes/2.31.53.json | 78 + .changes/2.31.54.json | 24 + .changes/2.31.55.json | 78 + .changes/2.31.56.json | 36 + .changes/2.31.57.json | 72 + .changes/2.31.58.json | 48 + .changes/2.31.59.json | 60 + .changes/2.31.60.json | 66 + .changes/2.31.61.json | 18 + .changes/2.31.62.json | 54 + .changes/2.31.63.json | 54 + .changes/2.31.64.json | 36 + .../bugfix-AWSSDKforJavaV2-a136845.json | 6 + .../api-surface-area-review-verification.yml | 6 +- .github/workflows/changelog-verification.yml | 17 +- .github/workflows/codebuild-ci.yml | 32 +- .../workflows/lock-conversation-closed-pr.yml | 35 + .github/workflows/merge-queue-metric.yml | 6 +- .github/workflows/new-module-verification.yml | 191 +++ CHANGELOG.md | 403 +++++ README.md | 12 +- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- .../resources/map-service-to-client-prefix | 7 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 7 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 7 +- buildspecs/validate-brazil-config.yml | 14 + bundle-logging-bridge/pom.xml | 2 +- bundle-sdk/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- .../maven/plugin/RegionGenerationMojo.java | 19 +- codegen-lite/pom.xml | 2 +- .../PartitionsRegionsMetadataLoader.java | 47 + .../codegen/lite/regions/RegionGenerator.java | 41 +- .../lite/regions/RegionMetadataGenerator.java | 10 +- .../RegionMetadataProviderGenerator.java | 8 +- .../codegen/lite/regions/model/Partition.java | 6 +- .../model/PartitionRegionsMetadata.java | 177 +++ .../model/PartitionsRegionsMetadata.java | 55 + .../lite/regions/RegionGenerationTest.java | 15 +- .../regions/region-metadata-provider.java | 48 +- .../awssdk/codegen/lite/regions/regions.java | 94 +- .../lite/test-partitions.json.resource | 255 ++++ codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- .../amazon/awssdk/codegen/AddMetadata.java | 3 +- .../amazon/awssdk/codegen/AddOperations.java | 3 +- .../amazon/awssdk/codegen/AddShapes.java | 3 +- ...itStringPayloadQueryProtocolProcessor.java | 3 +- .../MetadataModifiersProcessor.java | 3 +- .../SmithyRpcV2CborProtocolProcessor.java | 4 +- .../tasks/AuthSchemeGeneratorTasks.java | 6 + .../tasks/CommonInternalGeneratorTasks.java | 16 +- .../amazon/awssdk/codegen/internal/Utils.java | 7 +- .../customization/CustomizationConfig.java | 15 + .../model/service/ServiceMetadata.java | 19 + .../codegen/naming/DefaultNamingStrategy.java | 16 + .../awssdk/codegen/naming/NamingStrategy.java | 15 + .../awssdk/codegen/poet/PoetExtension.java | 4 + .../scheme/AuthSchemeInterceptorSpec.java | 42 + .../auth/scheme/AuthSchemeProviderSpec.java | 14 + .../poet/auth/scheme/AuthSchemeSpecUtils.java | 9 + .../PreferredAuthSchemeProviderSpec.java | 108 ++ .../poet/builder/BaseClientBuilderClass.java | 92 +- .../EnvironmentTokenSystemSettingsClass.java | 76 + .../poet/client/specs/JsonProtocolSpec.java | 8 + .../poet/client/specs/QueryProtocolSpec.java | 8 +- .../ServiceClientConfigurationUtils.java | 26 + .../poet/rules2/CodeGeneratorVisitor.java | 110 +- .../poet/rules2/CodegenExpressionBuidler.java | 43 +- .../codegen/poet/rules2/ComputeScopeTree.java | 193 +++ .../poet/rules2/EndpointProviderSpec2.java | 124 +- .../poet/rules2/PrepareForCodegenVisitor.java | 64 +- .../poet/rules2/RenameForCodegenVisitor.java | 86 ++ .../poet/rules2/RuleSetExpression.java | 10 + .../rules2/WalkRuleExpressionVisitor.java | 2 +- .../awssdk/codegen/utils/AuthUtils.java | 3 +- .../awssdk/codegen/utils/ProtocolUtils.java | 63 + .../codegen/rules/partitions.json.resource | 3 + .../naming/DefaultNamingStrategyTest.java | 33 + .../awssdk/codegen/poet/ClientTestModels.java | 16 + .../poet/auth/scheme/AuthSchemeSpecTest.java | 13 + .../builder/BaseClientBuilderClassTest.java | 8 + ...vironmentTokenSystemSettingsClassTest.java | 31 + .../awssdk/codegen/utils/AuthUtilsTest.java | 20 +- .../codegen/utils/ProtocolUtilsTest.java | 79 + ...-bearer-token-auth-scheme-interceptor.java | 164 ++ .../query-auth-scheme-preferred-provider.java | 51 + .../scheme/query-auth-scheme-provider.java | 23 +- ...oint-auth-params-auth-scheme-provider.java | 23 +- ...test-bearer-auth-client-builder-class.java | 61 +- .../sra/test-client-builder-class.java | 125 +- ...-client-builder-endpoints-auth-params.java | 27 +- ...lient-builder-internal-defaults-class.java | 52 +- ...-composed-sync-default-client-builder.java | 81 +- ...env-bearer-token-client-builder-class.java | 227 +++ ...ulti-auth-sigv4a-client-builder-class.java | 58 +- ...test-no-auth-ops-client-builder-class.java | 73 +- ...-no-auth-service-client-builder-class.java | 56 +- .../sra/test-query-client-builder-class.java | 27 +- ...test-bearer-auth-client-builder-class.java | 51 +- .../builder/test-client-builder-class.java | 119 +- ...-client-builder-endpoints-auth-params.java | 17 +- ...lient-builder-internal-defaults-class.java | 42 +- ...-composed-sync-default-client-builder.java | 75 +- .../test-h2-service-client-builder-class.java | 44 +- ...dgeForH2-service-client-builder-class.java | 42 +- ...test-no-auth-ops-client-builder-class.java | 70 +- ...-no-auth-service-client-builder-class.java | 42 +- .../test-query-client-builder-class.java | 17 +- .../customization-env-bearer-token.config | 3 + .../poet/client/c2j/rpcv2/service-2.json | 3 +- .../sra/test-aws-json-async-client-class.java | 731 ++++----- .../sra/test-cbor-async-client-class.java | 731 ++++----- .../sra/test-json-async-client-class.java | 814 +++++----- .../client/sra/test-json-client-class.java | 598 ++++---- .../sra/test-query-async-client-class.java | 456 +++--- .../client/sra/test-query-client-class.java | 473 +++--- .../sra/test-xml-async-client-class.java | 388 ++--- .../client/sra/test-xml-client-class.java | 327 ++-- .../test-aws-json-async-client-class.java | 741 ++++----- .../client/test-cbor-async-client-class.java | 741 ++++----- .../poet/client/test-cbor-client-class.java | 541 +++---- ...vironment-token-system-settings-class.java | 24 + .../client/test-json-async-client-class.java | 822 +++++----- .../poet/client/test-json-client-class.java | 604 ++++---- .../client/test-query-async-client-class.java | 462 +++--- .../poet/client/test-query-client-class.java | 479 +++--- .../client/test-xml-async-client-class.java | 394 ++--- .../poet/client/test-xml-client-class.java | 333 ++-- ...entconfiguration-withchecksum-builder.java | 18 + .../poet/rules2/endpoint-provider-class.java | 318 +--- ...int-provider-know-prop-override-class.java | 318 +--- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- .../DefaultCredentialsProvider.java | 9 +- core/aws-core/pom.xml | 2 +- .../AwsRequestOverrideConfiguration.java | 36 +- .../auth/AuthSchemePreferenceResolver.java | 123 ++ .../internal/AwsExecutionContextBuilder.java | 100 +- .../AwsRequestOverrideConfigurationTest.java | 13 + .../AwsExecutionContextBuilderTest.java | 98 +- .../AuthSchemePreferenceResolverTest.java | 107 ++ core/checksums-spi/pom.xml | 2 +- core/checksums/pom.xml | 2 +- .../checksums/internal/ConstructorCache.java | 12 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/http-auth-aws-crt/pom.xml | 2 +- core/http-auth-aws-eventstream/pom.xml | 2 +- core/http-auth-aws/pom.xml | 2 +- core/http-auth-spi/pom.xml | 2 +- core/http-auth/pom.xml | 2 +- core/identity-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- .../amazon/awssdk/profiles/ProfileFile.java | 11 +- .../awssdk/profiles/ProfileFileSupplier.java | 4 +- .../awssdk/profiles/ProfileProperty.java | 2 + .../internal/ProfileFileRefresher.java | 3 +- .../profiles/ProfileFileSupplierTest.java | 10 + .../awssdk/profiles/ProfileFileTest.java | 8 + .../internal/ProfileFileRefresherTest.java | 40 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/protocols/smithy-rpcv2-protocol/pom.xml | 2 +- core/regions/pom.xml | 2 +- .../regions/internal/region/endpoints.json | 111 ++ .../amazon/awssdk/regions/RegionTest.java | 29 + core/retries-spi/pom.xml | 2 +- core/retries/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- .../amazon/awssdk/core/SdkSystemSetting.java | 10 +- .../awssdk/core/async/AsyncRequestBody.java | 50 + .../core/async/AsyncResponseTransformer.java | 45 + .../BlockingInputStreamAsyncRequestBody.java | 5 + .../BlockingOutputStreamAsyncRequestBody.java | 5 + .../AsyncResponseTransformerListener.java | 9 + .../client/handler/ClientExecutionParams.java | 23 + .../SdkInternalExecutionAttribute.java | 15 + .../ByteArrayAsyncResponseTransformer.java | 5 + .../async/ByteBuffersAsyncRequestBody.java | 5 + .../internal/async/FileAsyncRequestBody.java | 5 + .../async/FileAsyncResponseTransformer.java | 5 + .../async/InputStreamResponseTransformer.java | 5 + ...putStreamWithExecutorAsyncRequestBody.java | 5 + .../PublisherAsyncResponseTransformer.java | 5 + .../pipeline/stages/ApplyUserAgentStage.java | 8 + .../sync/BufferingContentStreamProvider.java | 5 + .../sync/FileContentStreamProvider.java | 5 + .../amazon/awssdk/core/sync/RequestBody.java | 22 +- .../awssdk/core/sync/ResponseTransformer.java | 155 +- .../core/useragent/AdditionalMetadata.java | 118 ++ .../useragent/BusinessMetricFeatureId.java | 1 + .../stages/ApplyUserAgentStageTest.java | 26 + .../useragent/AdditionalMetadataTest.java | 41 + http-client-spi/pom.xml | 2 +- .../awssdk/http/ContentStreamProvider.java | 82 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 20 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- .../emf-metric-logging-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 6 +- release-scripts/pom.xml | 2 +- scripts/changelog/__init__.py | 3 +- scripts/changelog/git.py | 1 + scripts/changelog/model.py | 4 +- scripts/changelog/util.py | 13 +- scripts/changelog/writer.py | 4 +- .../generate_cross_link_data.py | 1 + scripts/finalize-release-changes | 4 +- scripts/generate-changelog | 2 +- scripts/new-change | 2 +- scripts/run-integ-test | 8 +- scripts/validate-brazil-config | 106 ++ services-custom/dynamodb-enhanced/pom.xml | 2 +- .../dynamodb/internal/TableIndices.java | 65 + .../client/DefaultDynamoDbAsyncTable.java | 9 +- .../internal/client/DefaultDynamoDbTable.java | 58 +- .../enhanced/dynamodb/TableIndicesTest.java | 105 ++ .../VersionedRecordExtensionTest.java | 2 +- .../client/DefaultDynamoDbAsyncTableTest.java | 64 + services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- .../s3-event-notifications/pom.xml | 2 +- .../DefaultS3EventNotificationReader.java | 2 +- .../model/S3EventNotificationReaderTest.java | 47 + services-custom/s3-transfer-manager/pom.xml | 2 +- .../transfer/s3/config/DownloadFilter.java | 44 +- .../s3/config/DownloadFilterTest.java | 167 ++ services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- .../codegen-resources/service-2.json | 39 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- .../codegen-resources/service-2.json | 25 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 9 +- .../codegen-resources/service-2.json | 705 ++++++++- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/applicationsignals/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- .../codegen-resources/service-2.json | 52 +- services/apptest/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/artifact/pom.xml | 2 +- services/athena/pom.xml | 2 +- .../codegen-resources/service-2.json | 69 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- .../codegen-resources/service-2.json | 90 +- services/autoscalingplans/pom.xml | 2 +- services/b2bi/pom.xml | 2 +- services/backup/pom.xml | 2 +- .../codegen-resources/service-2.json | 51 +- services/backupgateway/pom.xml | 2 +- services/backupsearch/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/bcmdataexports/pom.xml | 2 +- services/bcmpricingcalculator/pom.xml | 2 +- .../codegen-resources/service-2.json | 19 +- services/bedrock/pom.xml | 2 +- .../codegen-resources/service-2.json | 132 +- services/bedrockagent/pom.xml | 2 +- .../codegen-resources/service-2.json | 34 +- services/bedrockagentruntime/pom.xml | 2 +- .../codegen-resources/service-2.json | 131 +- services/bedrockdataautomation/pom.xml | 2 +- services/bedrockdataautomationruntime/pom.xml | 2 +- services/bedrockruntime/pom.xml | 2 +- services/billing/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chatbot/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cleanroomsml/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- .../codegen-resources/service-2.json | 128 +- services/cloudfront/pom.xml | 2 +- services/cloudfrontkeyvaluestore/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- .../codegen-resources/service-2.json | 475 +++--- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codeconnections/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- .../codegen-resources/service-2.json | 190 ++- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- .../codegen-resources/service-2.json | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcampaignsv2/pom.xml | 2 +- .../codegen-resources/service-2.json | 88 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controlcatalog/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 274 +++- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 850 ++++++----- .../codegen-resources/endpoint-tests.json | 318 ++-- services/costoptimizationhub/pom.xml | 2 +- .../codegen-resources/service-2.json | 71 +- services/customerprofiles/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 647 +++++++- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- .../codegen-resources/service-2.json | 12 + services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- .../codegen-resources/service-2.json | 148 +- services/datazone/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/deadline/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/directoryservicedata/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dsql/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- .../codegen-resources/service-2.json | 149 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- .../codegen-resources/service-2.json | 42 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- .../codegen-resources/service-2.json | 20 +- services/efs/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 566 +++++-- .../codegen-resources/endpoint-tests.json | 736 ++------- .../codegen-resources/service-2.json | 51 +- services/eks/pom.xml | 2 +- .../codegen-resources/service-2.json | 123 +- services/eksauth/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- .../codegen-resources/service-2.json | 96 +- services/entityresolution/pom.xml | 2 +- .../codegen-resources/service-2.json | 215 ++- services/eventbridge/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 3 +- .../codegen-resources/service-2.json | 60 +- services/evidently/pom.xml | 2 +- services/evs/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 350 +++++ .../codegen-resources/endpoint-tests.json | 314 ++++ .../codegen-resources/paginators-1.json | 22 + .../codegen-resources/service-2.json | 1356 +++++++++++++++++ .../codegen-resources/waiters-2.json | 5 + services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/freetier/pom.xml | 2 +- services/fsx/pom.xml | 2 +- .../codegen-resources/service-2.json | 104 +- services/gamelift/pom.xml | 2 +- services/gameliftstreams/pom.xml | 2 +- .../codegen-resources/service-2.json | 162 +- services/geomaps/pom.xml | 2 +- services/geoplaces/pom.xml | 2 +- services/georoutes/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/inspectorscan/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/invoicing/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 498 +++++- services/iot/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- .../codegen-resources/service-2.json | 7 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotmanagedintegrations/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 373 ++++- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- .../codegen-resources/service-2.json | 430 ++++-- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/launchwizard/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- .../codegen-resources/service-2.json | 29 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/mailmanager/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplaceagreement/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- .../codegen-resources/service-2.json | 201 ++- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplacedeployment/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/marketplacereporting/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- .../codegen-resources/service-2.json | 94 +- services/mediaconvert/pom.xml | 2 +- .../codegen-resources/service-2.json | 36 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- .../codegen-resources/service-2.json | 101 +- services/neptune/pom.xml | 2 +- services/neptunedata/pom.xml | 2 +- services/neptunegraph/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 905 ++++++++++- services/networkflowmonitor/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- .../codegen-resources/service-2.json | 35 +- services/networkmonitor/pom.xml | 2 +- services/notifications/pom.xml | 2 +- services/notificationscontacts/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/observabilityadmin/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/partnercentralselling/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/pcaconnectorad/pom.xml | 2 +- services/pcaconnectorscep/pom.xml | 2 +- services/pcs/pom.xml | 2 +- .../codegen-resources/service-2.json | 28 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 3 +- services/pricing/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qapps/pom.xml | 2 +- services/qbusiness/pom.xml | 2 +- services/qconnect/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- .../codegen-resources/service-2.json | 28 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- .../codegen-resources/service-2.json | 181 ++- services/repostspace/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- .../codegen-resources/service-2.json | 45 +- services/route53domains/pom.xml | 2 +- services/route53profiles/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 8 +- .../EmptyFileS3IntegrationTest.java | 2 +- .../HeadObjectIntegrationTest.java | 2 +- .../S3WithUrlHttpClientIntegrationTest.java | 3 +- ...rlHttpConnectionS3IntegrationTestBase.java | 3 +- .../codegen-resources/service-2.json | 139 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/s3tables/pom.xml | 2 +- .../codegen-resources/service-2.json | 25 +- services/sagemaker/pom.xml | 2 +- .../codegen-resources/service-2.json | 215 ++- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securityir/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/socialmessaging/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmguiconnect/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmquicksetup/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/supplychain/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- .../codegen-resources/service-2.json | 53 +- services/taxsettings/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreaminfluxdb/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- .../codegen-resources/service-2.json | 21 +- services/transcribestreaming/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 3 +- .../codegen-resources/service-2.json | 33 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/trustedadvisor/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 3 +- .../codegen-resources/service-2.json | 221 ++- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesthinclient/pom.xml | 2 +- .../codegen-resources/service-2.json | 13 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/architecture-tests/pom.xml | 2 +- .../amazon/awssdk/archtests/ArchUtils.java | 5 + .../CodingConventionWithSuppressionTest.java | 5 +- test/auth-tests/pom.xml | 2 +- .../pom.xml | 2 +- test/bundle-shading-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- .../customization.config | 4 + .../endpoint-rule-set.json | 355 +++++ .../endpoint-tests.json | 5 + .../environmenttokenprovider/service-2.json | 38 + .../multiauth/service-2.json | 25 +- .../PreferredAuthSchemeProviderTest.java | 121 ++ .../bearerauth/ClientBuilderTest.java | 3 + .../EnvironmentTokenProviderTest.java | 304 ++++ ...chemePreferenceResolverFunctionalTest.java | 258 ++++ ...ingBodyAndTransformerImplTrackingTest.java | 187 +++ test/crt-unavailable-tests/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- .../pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/s3-tests/pom.xml | 8 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- test/v2-migration-tests/pom.xml | 2 +- .../main/java/foo/bar/TransferManagerS3.java | 23 +- .../main/java/foo/bar/TransferManagerS3.java | 20 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- third-party/third-party-slf4j-api/pom.xml | 2 +- utils/pom.xml | 2 +- v2-migration/pom.xml | 2 +- .../TransferManagerMethodsToV2.java | 85 +- .../rewrite/aws-sdk-java-v1-to-v2-with-tm.yml | 1 + ...change-transfer-manager-simple-methods.yml | 28 + .../rewrite/change-transfer-manager-types.yml | 6 + 775 files changed, 23030 insertions(+), 9782 deletions(-) delete mode 100644 .attach_pid885 create mode 100644 .changes/2.31.52.json create mode 100644 .changes/2.31.53.json create mode 100644 .changes/2.31.54.json create mode 100644 .changes/2.31.55.json create mode 100644 .changes/2.31.56.json create mode 100644 .changes/2.31.57.json create mode 100644 .changes/2.31.58.json create mode 100644 .changes/2.31.59.json create mode 100644 .changes/2.31.60.json create mode 100644 .changes/2.31.61.json create mode 100644 .changes/2.31.62.json create mode 100644 .changes/2.31.63.json create mode 100644 .changes/2.31.64.json create mode 100644 .changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json create mode 100644 .github/workflows/lock-conversation-closed-pr.yml create mode 100644 .github/workflows/new-module-verification.yml create mode 100644 buildspecs/validate-brazil-config.yml create mode 100644 codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java create mode 100644 codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java create mode 100644 codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java create mode 100644 codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java create mode 100644 core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java create mode 100644 core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java create mode 100755 scripts/validate-brazil-config create mode 100644 services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java create mode 100644 services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java create mode 100644 services/evs/pom.xml create mode 100644 services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/evs/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/evs/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/evs/src/main/resources/codegen-resources/service-2.json create mode 100644 services/evs/src/main/resources/codegen-resources/waiters-2.json rename {http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http => services/s3/src/it/java/software/amazon/awssdk/services/s3}/urlconnection/EmptyFileS3IntegrationTest.java (97%) rename {http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http => services/s3/src/it/java/software/amazon/awssdk/services/s3}/urlconnection/HeadObjectIntegrationTest.java (97%) rename {http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http => services/s3/src/it/java/software/amazon/awssdk/services/s3}/urlconnection/S3WithUrlHttpClientIntegrationTest.java (98%) rename {http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http => services/s3/src/it/java/software/amazon/awssdk/services/s3}/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java (97%) create mode 100644 test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config create mode 100644 test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json create mode 100644 test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json create mode 100644 test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java create mode 100644 v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml diff --git a/.all-contributorsrc b/.all-contributorsrc index 35f26df90327..21353af60c47 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -1057,6 +1057,24 @@ "contributions": [ "code" ] + }, + { + "login": "jencymaryjoseph", + "name": "Jency Joseph", + "avatar_url": "https://avatars.githubusercontent.com/u/35571282?v=4", + "profile": "https://github.com/jencymaryjoseph", + "contributions": [ + "code" + ] + }, + { + "login": "reifiedbeans", + "name": "Drew Davis", + "avatar_url": "https://avatars.githubusercontent.com/u/9686215?v=4", + "profile": "https://github.com/reifiedbeans", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/.attach_pid885 b/.attach_pid885 deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/.changes/2.31.52.json b/.changes/2.31.52.json new file mode 100644 index 000000000000..a2a40faf30d8 --- /dev/null +++ b/.changes/2.31.52.json @@ -0,0 +1,42 @@ +{ + "version": "2.31.52", + "date": "2025-05-28", + "entries": [ + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "You can now use VPC endpoint associations to create multiple firewall endpoints for a single firewall." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Enable the option to automatically delete underlying Amazon EBS snapshots when deregistering Amazon Machine Images (AMIs)" + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Allow for more than 2 characters for location codes in EventBridge ARNs" + }, + { + "type": "feature", + "category": "Cost Optimization Hub", + "contributor": "", + "description": "This release allows customers to modify their preferred commitment term and payment options." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "Add support to change ephemeral storage. Add a new field \"TestResult\" under CanaryRunStatus." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.53.json b/.changes/2.31.53.json new file mode 100644 index 000000000000..2086c9b4320a --- /dev/null +++ b/.changes/2.31.53.json @@ -0,0 +1,78 @@ +{ + "version": "2.31.53", + "date": "2025-05-29", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "Add support for customizable build instance sizes. CreateApp and UpdateApp operations now accept a new JobConfig parameter composed of BuildComputeType." + }, + { + "type": "feature", + "category": "AWS Billing and Cost Management Pricing Calculator", + "contributor": "", + "description": "Add AFTER_DISCOUNTS_AND_COMMITMENTS to Workload Estimate Rate Type. Set ListWorkLoadEstimateUsage maxResults range to minimum of 0 and maximum of 300." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "CloudTrail Feature Release: Support for Enriched Events with Configurable Context for Event Data Store" + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "contributor": "", + "description": "This release adds Tag support for Event Action resource, through which customers can create event actions with Tags and retrieve event actions with Tags." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AgentArns field is made optional for Object Storage and Azure Blob location create requests. Location credentials are now managed via Secrets Manager, and may be encrypted with service managed or customer managed keys. Authentication is now optional for Azure Blob locations." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Amazon Connect Service Feature: Email Recipient Limit Increase" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "FSx API changes to support the public launch of new Intelligent Tiering storage class on Amazon FSx for Lustre" + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service RealTime", + "contributor": "", + "description": "IVS Real-Time now offers customers the participant replication that allow customers to copy a participant from one stage to another." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "Amazon MWAA now lets you choose a worker replacement strategy when updating an environment. This release adds two worker replacement strategies: FORCED (default), which stops workers immediately, and GRACEFUL, which allows workers to finish current tasks before shutting down." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Add maintenance status field to DescribeMlflowTrackingServer API response" + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Adding checksum support for S3 PutBucketOwnershipControls API." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Add support for \"apple\" CpuManufacturer in ABIS" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.54.json b/.changes/2.31.54.json new file mode 100644 index 000000000000..e2adfa5957b7 --- /dev/null +++ b/.changes/2.31.54.json @@ -0,0 +1,24 @@ +{ + "version": "2.31.54", + "date": "2025-05-30", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Release new parameter CapacityReservationConfig in ProductionVariant" + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds the capability for users to specify an optional Execution IAM policy in the StartJobRun action. The resulting permissions assumed by the job run is the intersection of the permissions in the Execution Role and the specified Execution IAM Policy." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.55.json b/.changes/2.31.55.json new file mode 100644 index 000000000000..6fd8f18cb2cf --- /dev/null +++ b/.changes/2.31.55.json @@ -0,0 +1,78 @@ +{ + "version": "2.31.55", + "date": "2025-06-02", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "You can now subscribe to Amazon SNS notifications and Amazon EventBridge events for backup indexing. You can now receive notifications when a backup index is created, deleted, or fails to create, enhancing your ability to monitor and track your backup operations." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze Amazon Aurora database clusters and generate Aurora I/O-Optimized recommendations." + }, + { + "type": "feature", + "category": "AWS EntityResolution", + "contributor": "", + "description": "Add support for generating match IDs in near real-time." + }, + { + "type": "feature", + "category": "AWS Parallel Computing Service", + "contributor": "", + "description": "Introduces SUSPENDING and SUSPENDED states for clusters, compute node groups, and queues." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Improve the endpoint rules performance by directly passing the needed params instead of using a POJO to keep track of them." + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock", + "contributor": "", + "description": "This release adds the Agent Lifecycle Paused State feature to Amazon Bedrock agents. By using an agent's alias, you can temporarily suspend agent operations during maintenance, updates, or other situations." + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "Add support for the managed query result in the workgroup APIs. The managed query result configuration enables users to store query results to Athena owned storage." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Updates Amazon ECS documentation to include note for upcoming default log driver mode change." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Add support for filtering ListInsights API calls on MISCONFIGURATION insight category" + }, + { + "type": "feature", + "category": "Cost Optimization Hub", + "contributor": "", + "description": "Support recommendations for Aurora instance and Aurora cluster storage." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "Support for Java runtime handler pattern." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.56.json b/.changes/2.31.56.json new file mode 100644 index 000000000000..82589878b17e --- /dev/null +++ b/.changes/2.31.56.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.56", + "date": "2025-06-03", + "entries": [ + { + "type": "bugfix", + "category": "AWS S3 Event Notifications", + "contributor": "reifiedbeans", + "description": "Fixed parsing of S3 event notifications to allow eventTime to be null when eventName is not" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix NPE in `ProfileFileSupplier.defaultSupplier` when both credentials and config files do not exist." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Adds support to set the routing mode for a custom domain name." + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Adds support to create routing rules and set the routing mode for a custom domain name." + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "AWS EMR Serverless: Adds a new option in the CancelJobRun API in EMR 7.9.0+, to cancel a job with grace period. This feature is enabled by default with a 120-second grace period for streaming jobs and is not enabled by default for batch jobs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.57.json b/.changes/2.31.57.json new file mode 100644 index 000000000000..bb89037b8928 --- /dev/null +++ b/.changes/2.31.57.json @@ -0,0 +1,72 @@ +{ + "version": "2.31.57", + "date": "2025-06-04", + "entries": [ + { + "type": "bugfix", + "category": "S3 Transfer Manager", + "contributor": "jencymaryjoseph", + "description": "DownloadFilter type incompatability methods overriden from extended interface" + }, + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "Update documentation for cacheConfig in CreateApp API" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes support for embedding and signing C2PA content credentials in MP4 outputs." + }, + { + "type": "feature", + "category": "AWS Invoicing", + "contributor": "", + "description": "Added new Invoicing ListInvoiceSummaries API Operation" + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "contributor": "", + "description": "This release updates the DescribeFlow API to show peer IP addresses. You can now identify the peer IP addresses of devices connected to your sources and outputs. This helps you to verify and troubleshoot your flow's active connections." + }, + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "You can now monitor flow and alert log metrics from the Network Firewall console." + }, + { + "type": "feature", + "category": "Amazon Elastic VMware Service", + "contributor": "", + "description": "Amazon Elastic VMware Service (Amazon EVS) allows you to run VMware Cloud Foundation (VCF) directly within your Amazon VPC including simplified self-managed migration experience with guided workflow in AWS console or via AWS CLI, get full access to their VCF deployment and VCF license portability." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Added support for p6-b200 instance type in SageMaker Training Jobs and Training Plans." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "contributor": "", + "description": "AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "contributor": "", + "description": "AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.58.json b/.changes/2.31.58.json new file mode 100644 index 000000000000..cd2c06fabaf6 --- /dev/null +++ b/.changes/2.31.58.json @@ -0,0 +1,48 @@ +{ + "version": "2.31.58", + "date": "2025-06-05", + "entries": [ + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "contributor": "", + "description": "Fixed DynamoDbEnhancedClient DefaultDynamoDbAsyncTable::createTable() to create secondary indices that are defined on annotations of the POJO class, similar to DefaultDynamoDbTable::createTable()." + }, + { + "type": "feature", + "category": "AWS Billing and Cost Management Pricing Calculator", + "contributor": "", + "description": "Updating the minimum for List APIs to be 1 (instead of 0)" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Add new warning type 'EXCLUDED_PROPERTIES'" + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "AWS KMS announces the support for on-demand rotation of symmetric-encryption KMS keys with imported key material (EXTERNAL origin)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Added ability to configure preferred authentication schemes when multiple auth options are available." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "AWS WAF adds support for ASN-based traffic filtering and support for ASN-based rate limiting." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.59.json b/.changes/2.31.59.json new file mode 100644 index 000000000000..8f3cfd0c9ea6 --- /dev/null +++ b/.changes/2.31.59.json @@ -0,0 +1,60 @@ +{ + "version": "2.31.59", + "date": "2025-06-06", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix expiration in past warning during profile credential loading." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "Remove unpopulated KeyMaterialId from Encrypt Response" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for protocols field in service model" + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock Runtime", + "contributor": "", + "description": "This release introduces the `PromptCreationConfigurations` input parameter, which includes fields to control prompt population for `InvokeAgent` or `InvokeInlineAgent` requests." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "Adds support for defining an ordered preference list of different Rekognition Face Liveness challenge types when calling CreateFaceLivenessSession." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Include Global Cluster Identifier in DBCluster if the DBCluster is a Global Cluster Member." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Amazon Route 53 now supports the Asia Pacific (Taipei) Region (ap-east-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "Amazon S3 Tables", + "contributor": "", + "description": "S3 Tables now supports getting details about a table via its table ARN." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.60.json b/.changes/2.31.60.json new file mode 100644 index 000000000000..c8beab43a60d --- /dev/null +++ b/.changes/2.31.60.json @@ -0,0 +1,66 @@ +{ + "version": "2.31.60", + "date": "2025-06-09", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "contributor": "", + "description": "Deprecate `atRestEncryptionEnabled` and `transitEncryptionEnabled` attributes in `CreateApiCache` action. Encryption is always enabled for new caches." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "Support dual-stack endpoints for ce api" + }, + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "contributor": "", + "description": "The ListEntities API now supports the EntityID, LastModifiedDate, ProductTitle, and Visibility filters for machine learning products. You can also sort using all of those filters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Adds support for configuring bearer auth using a token sourced from the environment for services with the `enableEnvironmentBearerToken` customization flag." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated Region class generation to use Partitions.json instead of the Endpoints.json and removed the hardcoded global regions." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This release introduces capability of Profile Explorer, using correct ingestion timestamp & using historical data for computing calculated attributes, and new standard objects for T&H as part of Amazon Connect Customer Profiles service." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Release to support Elastic VMware Service (Amazon EVS) Subnet and Amazon EVS Network Interface Types." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "contributor": "", + "description": "Added support for Internet Protocol Version 6 (IPv6) on EFS Service APIs and mount targets." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces Thin Client", + "contributor": "", + "description": "Add ConflictException to UpdateEnvironment API" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.61.json b/.changes/2.31.61.json new file mode 100644 index 000000000000..108562eba48d --- /dev/null +++ b/.changes/2.31.61.json @@ -0,0 +1,18 @@ +{ + "version": "2.31.61", + "date": "2025-06-10", + "entries": [ + { + "type": "feature", + "category": "Amazon GameLift Streams", + "contributor": "", + "description": "Documentation updates for Amazon GameLift Streams to address formatting errors, correct resource ID examples, and update links to other guides" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.62.json b/.changes/2.31.62.json new file mode 100644 index 000000000000..cbcc401665a5 --- /dev/null +++ b/.changes/2.31.62.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.62", + "date": "2025-06-11", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "jencymaryjoseph", + "description": "Deprecated DefaultCredentialsProvider.create() since it creates Singleton instance" + }, + { + "type": "feature", + "category": "AWS Control Catalog", + "contributor": "", + "description": "Introduced ListControlMappings API that retrieves control mappings. Added control aliases and governed resources fields in GetControl and ListControls APIs. New filtering capability in ListControls API, with implementation identifiers and implementation types." + }, + { + "type": "feature", + "category": "AWS Network Manager", + "contributor": "", + "description": "Add support for public DNS hostname resolution to private IP addresses across Cloud WAN-managed VPCs. Add support for security group referencing across Cloud WAN-managed VPCs." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "WAF now provides two DDoS protection options: resource-level monitoring for Application Load Balancers and the AWSManagedRulesAntiDDoSRuleSet managed rule group for CloudFront distributions." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Release for EKS Pod Identity Cross Account feature and disableSessionTags flag." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "Add support for the Assisted NLU feature to improve bot performance" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Updates Amazon RDS documentation for Amazon RDS for Db2 cross-Region replicas in standby mode." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.63.json b/.changes/2.31.63.json new file mode 100644 index 000000000000..89c3626a137a --- /dev/null +++ b/.changes/2.31.63.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.63", + "date": "2025-06-12", + "entries": [ + { + "type": "feature", + "category": "AWS IoT FleetWise", + "contributor": "", + "description": "Add new status READY_FOR_CHECKIN used for vehicle synchronisation" + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "AWS KMS announces the support of ML-DSA key pairs that creates post-quantum safe digital signatures." + }, + { + "type": "feature", + "category": "AWS Parallel Computing Service", + "contributor": "", + "description": "Fixed regex patterns for ARN fields." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway" + }, + { + "type": "feature", + "category": "AmazonConnectCampaignServiceV2", + "contributor": "", + "description": "Added PutInstanceCommunicationLimits and GetInstanceCommunicationLimits APIs" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This Amazon ECS release supports updating the capacityProviderStrategy parameter in update-service." + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds support for retrieval of the optional executionIamPolicy field in the GetJobRun API response." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.64.json b/.changes/2.31.64.json new file mode 100644 index 000000000000..5413641bccd0 --- /dev/null +++ b/.changes/2.31.64.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.64", + "date": "2025-06-16", + "entries": [ + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "You can now create firewalls using a Transit Gateway instead of a VPC, resulting in a TGW attachment." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add tracking of RequestBody/ResponseTransfromer implementations used in UserAgent." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a Amazon SageMaker trained Amazon Nova model into Amazon Bedrock for inference." + }, + { + "type": "feature", + "category": "Amazon Elastic Container Registry", + "contributor": "", + "description": "The `DescribeImageScanning` API now includes `lastInUseAt` and `InUseCount` fields that can be used to prioritize vulnerability remediation for images that are actively being used." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release 1) adds a new S3DataType Converse for SageMaker training 2)adds C8g R7gd M8g C6in P6 P6e instance type for SageMaker endpoint 3) adds m7i, r7i, c7i instance type for SageMaker Training and Processing." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json b/.changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json new file mode 100644 index 000000000000..9c3ed4965c7b --- /dev/null +++ b/.changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json @@ -0,0 +1,6 @@ +{ + "type": "bugfix", + "category": "AWS SDK for Java V2", + "contributor": "", + "description": "Fix a bug in ConstructorCache when classes are GC'ed but not removed from cache" +} diff --git a/.github/workflows/api-surface-area-review-verification.yml b/.github/workflows/api-surface-area-review-verification.yml index b652db2559db..f65e30c758de 100644 --- a/.github/workflows/api-surface-area-review-verification.yml +++ b/.github/workflows/api-surface-area-review-verification.yml @@ -9,6 +9,8 @@ on: types: [ opened, synchronize, reopened, labeled, unlabeled ] branches: - master + paths: + - '**/*.java' jobs: api-surface-area-review-verification: @@ -20,7 +22,7 @@ jobs: if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-api-surface-area-change') }} run: | git fetch origin ${{ github.base_ref }} --depth 1 - FILES=$( git diff remotes/origin/${{ github.base_ref }} --name-only | grep "\.java$" | grep -v -E "(^|/)(internal|test|codegen|v2-migration)/" || true) + FILES=$( git diff remotes/origin/${{ github.base_ref }} --name-only | grep "\.java$" | grep -v -E "(^|/)(internal|test|codegen|v2-migration|it)/" || true) if [ -n "$FILES" ]; then echo "::error::Changes around protected/public APIs found:" echo "$FILES" | while read file; do @@ -37,4 +39,4 @@ jobs: echo "::error ::Change around public/protected APIs has been detected. Please either:" echo "::error ::* Review it with the team and add the 'api-surface-area-reviewed' label to this PR after approval –or–" echo "::error ::* Add the 'no-api-surface-area-change' label to this PR in case this is a false positive" - exit 1 \ No newline at end of file + exit 1 diff --git a/.github/workflows/changelog-verification.yml b/.github/workflows/changelog-verification.yml index b6f35ea36c6a..111ebf18a4c0 100644 --- a/.github/workflows/changelog-verification.yml +++ b/.github/workflows/changelog-verification.yml @@ -9,6 +9,8 @@ on: types: [ opened, synchronize, reopened, labeled, unlabeled ] branches: - master + paths: + - '**/*.java' jobs: changelog-verification: @@ -18,12 +20,21 @@ jobs: - name: Check for changelog entry if: ${{ !contains(github.event.pull_request.labels.*.name, 'changelog-not-required') }} run: | - git fetch origin ${{ github.base_ref }} --depth 1 && \ - git diff remotes/origin/${{ github.base_ref }} --name-only | grep -P "\.changes/next-release/*[a-zA-Z0-9_-]+\.json" + git fetch origin ${{ github.base_ref }} --depth 1 + NON_TEST_FILES=$( git diff remotes/origin/${{ github.base_ref }} --name-only | grep "\.java$" | grep -v -E "(^|/)(test|it)/" || true) + if [ -n "NON_TEST_FILES" ]; then + echo "::error::Non-test Java change found:" + echo "NON_TEST_FILES" | while read file; do + echo "::error::$file" + done + git diff remotes/origin/${{ github.base_ref }} --name-only | grep -P "\.changes/next-release/*[a-zA-Z0-9_-]+\.json" + else + echo "No change that may require a changelog entry found." + fi - name: Error message if: ${{ failure() }} run: | echo "::error ::No new/updated changelog entry found in /.changes/next-release directory. Please either:" echo "::error ::* Add a changelog entry (see CONTRIBUTING.md for instructions) –or–" echo "::error ::* Add the 'changelog-not-required' label to this PR (in rare cases not warranting a changelog entry)" - exit 1 \ No newline at end of file + exit 1 diff --git a/.github/workflows/codebuild-ci.yml b/.github/workflows/codebuild-ci.yml index 749c2762b885..fa523f4c0476 100644 --- a/.github/workflows/codebuild-ci.yml +++ b/.github/workflows/codebuild-ci.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -34,7 +34,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -64,7 +64,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -79,7 +79,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -93,7 +93,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -107,7 +107,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -130,7 +130,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -139,12 +139,26 @@ jobs: uses: aws-actions/aws-codebuild-run-build@v1 with: project-name: aws-sdk-java-v2-endpoints-test + brazil-json-validation: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Validate Brazil config + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-java-sdk-v2-validate-brazil-config migration-tests: if: github.repository == 'aws/aws-sdk-java-v2' runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 diff --git a/.github/workflows/lock-conversation-closed-pr.yml b/.github/workflows/lock-conversation-closed-pr.yml new file mode 100644 index 000000000000..0e75c1b07aa1 --- /dev/null +++ b/.github/workflows/lock-conversation-closed-pr.yml @@ -0,0 +1,35 @@ +name: Lock PR Conversation on Close + +on: + pull_request: + types: [closed] + +jobs: + lock-conversation-closed-prs: + if: github.repository == 'aws/aws-sdk-java-v2' + name: Lock PR Conversation on Close + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Lock PR conversation on Close + uses: actions/github-script@v7 + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + script: | + const prNumber = context.payload.pull_request.number; + + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: "This pull request has been closed and the conversation has been locked. Comments on closed PRs are hard for our team to see. If you need more assistance, please open a new issue that references this one." + }); + + await github.rest.issues.lock({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + lock_reason: "resolved" + }); \ No newline at end of file diff --git a/.github/workflows/merge-queue-metric.yml b/.github/workflows/merge-queue-metric.yml index f7c12aa12429..259f03e4d268 100644 --- a/.github/workflows/merge-queue-metric.yml +++ b/.github/workflows/merge-queue-metric.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -32,11 +32,11 @@ jobs: if: ${{ github.repository == 'aws/aws-sdk-java-v2' && github.event.action == 'dequeued' && github.event.reason != 'MERGE'}} steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 role-duration-seconds: 900 - name: Record merge queue removal run: | - aws --region us-west-2 cloudwatch put-metric-data --namespace AwsJavaSdkV2/GitHub --metric-name MergeQueue-Remove --unit Count --value 1 --dimensions Branch=master \ No newline at end of file + aws --region us-west-2 cloudwatch put-metric-data --namespace AwsJavaSdkV2/GitHub --metric-name MergeQueue-Remove --unit Count --value 1 --dimensions Branch=master diff --git a/.github/workflows/new-module-verification.yml b/.github/workflows/new-module-verification.yml new file mode 100644 index 000000000000..b6c59d11ec9f --- /dev/null +++ b/.github/workflows/new-module-verification.yml @@ -0,0 +1,191 @@ +name: New Module Verification + +on: + pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] + branches: + - master + - feature/master/* + paths: + - '**/*.xml' + - '.brazil.json' + +permissions: + contents: read + +jobs: + new-module-verification: + name: Verify New Modules + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Check for new module additions + id: check-new-modules + shell: bash + run: | + set -euo pipefail + + echo "::group::Detecting new modules" + git fetch origin ${{ github.base_ref }} --depth 1 + + # Find new pom.xml files in the diff + NEW_POM_FILES=$(git diff --name-only remotes/origin/${{ github.base_ref }} | grep -E '.*pom\.xml$' | grep -v "target/" || echo "") + + if [ -z "$NEW_POM_FILES" ]; then + echo "No new modules detected." + echo "new_modules_found=false" >> $GITHUB_OUTPUT + exit 0 + fi + + echo "Potential new modules detected:" + echo "$NEW_POM_FILES" + echo "new_modules_found=true" >> $GITHUB_OUTPUT + + # Save the list of new pom files for later steps + echo "$NEW_POM_FILES" > new_pom_files.txt + echo "::endgroup::" + + - name: Verify new modules + if: steps.check-new-modules.outputs.new_modules_found == 'true' + shell: bash + run: | + set -euo pipefail + + NEW_POM_FILES=$(cat new_pom_files.txt) + + # Initialize counters and error flag + TEST_MODULES=0 + NON_TEST_MODULES=0 + HAS_ERRORS=0 + + echo "::group::Analyzing new modules" + + for POM_FILE in $NEW_POM_FILES; do + MODULE_DIR=$(dirname "$POM_FILE") + MODULE_NAME=$(basename "$MODULE_DIR") + + # Check if this is a new module (not just an updated pom.xml) + if git show remotes/origin/${{ github.base_ref }}:"$POM_FILE" &>/dev/null; then + echo "Skipping $POM_FILE - file already exists in base branch" + continue + fi + + echo "New module detected: $MODULE_DIR" + + # Check if it's a test module + if [[ "$MODULE_DIR" == *"/test/"* || "$MODULE_DIR" == *"/it/"* || "$MODULE_DIR" == *"-test"* || "$MODULE_DIR" == *"-tests"* ]]; then + echo "::group::Test module: $MODULE_DIR" + TEST_MODULES=$((TEST_MODULES + 1)) + + echo "Verifying test module requirements..." + + # 1. Check if excluded from maven deploy command + if ! grep -q "$MODULE_NAME" buildspecs/release-to-maven.yml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not excluded from maven deploy command in buildspecs/release-to-maven.yml" + HAS_ERRORS=1 + else + echo "✅ Module is excluded from maven deploy command" + fi + + # 2. Check if excluded from javadoc generation + if ! grep -q "$MODULE_NAME" buildspecs/release-javadoc.yml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not excluded from javadoc generation in buildspecs/release-javadoc.yml" + HAS_ERRORS=1 + else + echo "✅ Module is excluded from javadoc generation" + fi + + # 3. Check if Brazil import is skipped + if ! grep -q "\"$MODULE_NAME\".*\"skip\".*true" .brazil.json 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not configured to skip Brazil import in .brazil.json" + HAS_ERRORS=1 + else + echo "✅ Brazil import is skipped for this module" + fi + echo "::endgroup::" + + else + echo "::group::Non-test module: $MODULE_DIR" + NON_TEST_MODULES=$((NON_TEST_MODULES + 1)) + + echo "Verifying non-test module requirements..." + + # 1. Check for Automatic-Module-Name in pom.xml + if ! grep -q "Automatic-Module-Name" "$POM_FILE" 2>/dev/null; then + echo "::error::Automatic-Module-Name is not specified in $POM_FILE" + HAS_ERRORS=1 + else + echo "✅ Automatic-Module-Name is specified" + fi + + # 2. Check if added to tests-coverage-reporting pom.xml + if ! grep -q ".*$MODULE_NAME" test/tests-coverage-reporting/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to tests-coverage-reporting pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to tests-coverage-reporting" + fi + + # 3. Check if added to aws-sdk-java pom.xml + if ! grep -q ".*$MODULE_NAME" aws-sdk-java/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to aws-sdk-java pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to aws-sdk-java pom.xml" + fi + + # 4. Check if added to architecture-tests pom.xml + if ! grep -q ".*$MODULE_NAME" test/architecture-tests/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to architecture-tests pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to architecture-tests pom.xml" + fi + + # 5. Check if added to bom pom.xml + if ! grep -q "$MODULE_NAME" bom/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to bom pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to bom pom.xml" + fi + + # 6. Check if japicmp plugin config is updated + JAPICMP_CHECK=$(grep -A 50 "japicmp-maven-plugin" pom.xml 2>/dev/null | grep -A 50 "" 2>/dev/null | grep -q "$MODULE_NAME" 2>/dev/null || echo "MISSING") + if [ "$JAPICMP_CHECK" = "MISSING" ]; then + echo "::error::Module $MODULE_NAME is not included in japicmp-maven-plugin includeModules section in pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is included in japicmp-maven-plugin configuration" + fi + + # 7. Check if package name mapping is added in .brazil.json + if ! grep -q "\"$MODULE_NAME\"" .brazil.json 2>/dev/null; then + echo "::error::Package name mapping for $MODULE_NAME is not added in .brazil.json" + HAS_ERRORS=1 + else + echo "✅ Package name mapping is added in .brazil.json" + fi + echo "::endgroup::" + fi + done + echo "::endgroup::" + + echo "::group::Verification summary" + echo "Verification complete." + echo "Test modules found: $TEST_MODULES" + echo "Non-test modules found: $NON_TEST_MODULES" + + if [ $HAS_ERRORS -eq 1 ]; then + echo "::error::Some verification checks failed. Please review the errors above and fix them." + exit 1 + else + echo "✅ All automated verification checks passed!" + fi + echo "::endgroup::" diff --git a/CHANGELOG.md b/CHANGELOG.md index fa46642ff186..74401c028a6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,407 @@ #### 👋 _Looking for changelogs for older versions? You can find them in the [changelogs](./changelogs) directory._ +# __2.31.64__ __2025-06-16__ +## __AWS Network Firewall__ + - ### Features + - You can now create firewalls using a Transit Gateway instead of a VPC, resulting in a TGW attachment. + +## __AWS SDK for Java v2__ + - ### Features + - Add tracking of RequestBody/ResponseTransfromer implementations used in UserAgent. + +## __Amazon Bedrock__ + - ### Features + - This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a Amazon SageMaker trained Amazon Nova model into Amazon Bedrock for inference. + +## __Amazon Elastic Container Registry__ + - ### Features + - The `DescribeImageScanning` API now includes `lastInUseAt` and `InUseCount` fields that can be used to prioritize vulnerability remediation for images that are actively being used. + +## __Amazon SageMaker Service__ + - ### Features + - This release 1) adds a new S3DataType Converse for SageMaker training 2)adds C8g R7gd M8g C6in P6 P6e instance type for SageMaker endpoint 3) adds m7i, r7i, c7i instance type for SageMaker Training and Processing. + +# __2.31.63__ __2025-06-12__ +## __AWS IoT FleetWise__ + - ### Features + - Add new status READY_FOR_CHECKIN used for vehicle synchronisation + +## __AWS Key Management Service__ + - ### Features + - AWS KMS announces the support of ML-DSA key pairs that creates post-quantum safe digital signatures. + +## __AWS Parallel Computing Service__ + - ### Features + - Fixed regex patterns for ARN fields. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway + +## __Amazon EC2 Container Service__ + - ### Features + - This Amazon ECS release supports updating the capacityProviderStrategy parameter in update-service. + +## __AmazonApiGatewayV2__ + - ### Features + - Documentation updates for Amazon API Gateway + +## __AmazonConnectCampaignServiceV2__ + - ### Features + - Added PutInstanceCommunicationLimits and GetInstanceCommunicationLimits APIs + +## __EMR Serverless__ + - ### Features + - This release adds support for retrieval of the optional executionIamPolicy field in the GetJobRun API response. + +# __2.31.62__ __2025-06-11__ +## __AWS Control Catalog__ + - ### Features + - Introduced ListControlMappings API that retrieves control mappings. Added control aliases and governed resources fields in GetControl and ListControls APIs. New filtering capability in ListControls API, with implementation identifiers and implementation types. + +## __AWS Network Manager__ + - ### Features + - Add support for public DNS hostname resolution to private IP addresses across Cloud WAN-managed VPCs. Add support for security group referencing across Cloud WAN-managed VPCs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Deprecated DefaultCredentialsProvider.create() since it creates Singleton instance + - Contributed by: [@jencymaryjoseph](https://github.com/jencymaryjoseph) + +## __AWS WAFV2__ + - ### Features + - WAF now provides two DDoS protection options: resource-level monitoring for Application Load Balancers and the AWSManagedRulesAntiDDoSRuleSet managed rule group for CloudFront distributions. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Release for EKS Pod Identity Cross Account feature and disableSessionTags flag. + +## __Amazon Lex Model Building V2__ + - ### Features + - Add support for the Assisted NLU feature to improve bot performance + +## __Amazon Relational Database Service__ + - ### Features + - Updates Amazon RDS documentation for Amazon RDS for Db2 cross-Region replicas in standby mode. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@jencymaryjoseph](https://github.com/jencymaryjoseph) +# __2.31.61__ __2025-06-10__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon GameLift Streams__ + - ### Features + - Documentation updates for Amazon GameLift Streams to address formatting errors, correct resource ID examples, and update links to other guides + +# __2.31.60__ __2025-06-09__ +## __AWS AppSync__ + - ### Features + - Deprecate `atRestEncryptionEnabled` and `transitEncryptionEnabled` attributes in `CreateApiCache` action. Encryption is always enabled for new caches. + +## __AWS Cost Explorer Service__ + - ### Features + - Support dual-stack endpoints for ce api + +## __AWS Marketplace Catalog Service__ + - ### Features + - The ListEntities API now supports the EntityID, LastModifiedDate, ProductTitle, and Visibility filters for machine learning products. You can also sort using all of those filters. + +## __AWS SDK for Java v2__ + - ### Features + - Adds support for configuring bearer auth using a token sourced from the environment for services with the `enableEnvironmentBearerToken` customization flag. + - Updated Region class generation to use Partitions.json instead of the Endpoints.json and removed the hardcoded global regions. + - Updated endpoint and partition metadata. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This release introduces capability of Profile Explorer, using correct ingestion timestamp & using historical data for computing calculated attributes, and new standard objects for T&H as part of Amazon Connect Customer Profiles service. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Release to support Elastic VMware Service (Amazon EVS) Subnet and Amazon EVS Network Interface Types. + +## __Amazon Elastic File System__ + - ### Features + - Added support for Internet Protocol Version 6 (IPv6) on EFS Service APIs and mount targets. + +## __Amazon WorkSpaces Thin Client__ + - ### Features + - Add ConflictException to UpdateEnvironment API + +# __2.31.59__ __2025-06-06__ +## __AWS Key Management Service__ + - ### Features + - Remove unpopulated KeyMaterialId from Encrypt Response + +## __AWS SDK for Java v2__ + - ### Features + - Add support for protocols field in service model + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix expiration in past warning during profile credential loading. + +## __Agents for Amazon Bedrock Runtime__ + - ### Features + - This release introduces the `PromptCreationConfigurations` input parameter, which includes fields to control prompt population for `InvokeAgent` or `InvokeInlineAgent` requests. + +## __Amazon Rekognition__ + - ### Features + - Adds support for defining an ordered preference list of different Rekognition Face Liveness challenge types when calling CreateFaceLivenessSession. + +## __Amazon Relational Database Service__ + - ### Features + - Include Global Cluster Identifier in DBCluster if the DBCluster is a Global Cluster Member. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the Asia Pacific (Taipei) Region (ap-east-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Amazon S3 Tables__ + - ### Features + - S3 Tables now supports getting details about a table via its table ARN. + +# __2.31.58__ __2025-06-05__ +## __AWS Billing and Cost Management Pricing Calculator__ + - ### Features + - Updating the minimum for List APIs to be 1 (instead of 0) + +## __AWS CloudFormation__ + - ### Features + - Add new warning type 'EXCLUDED_PROPERTIES' + +## __AWS Key Management Service__ + - ### Features + - AWS KMS announces the support for on-demand rotation of symmetric-encryption KMS keys with imported key material (EXTERNAL origin). + +## __AWS SDK for Java v2__ + - ### Features + - Added ability to configure preferred authentication schemes when multiple auth options are available. + - Updated endpoint and partition metadata. + +## __AWS WAFV2__ + - ### Features + - AWS WAF adds support for ASN-based traffic filtering and support for ASN-based rate limiting. + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fixed DynamoDbEnhancedClient DefaultDynamoDbAsyncTable::createTable() to create secondary indices that are defined on annotations of the POJO class, similar to DefaultDynamoDbTable::createTable(). + +# __2.31.57__ __2025-06-04__ +## __AWS Amplify__ + - ### Features + - Update documentation for cacheConfig in CreateApp API + +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes support for embedding and signing C2PA content credentials in MP4 outputs. + +## __AWS Invoicing__ + - ### Features + - Added new Invoicing ListInvoiceSummaries API Operation + +## __AWS MediaConnect__ + - ### Features + - This release updates the DescribeFlow API to show peer IP addresses. You can now identify the peer IP addresses of devices connected to your sources and outputs. This helps you to verify and troubleshoot your flow's active connections. + +## __AWS Network Firewall__ + - ### Features + - You can now monitor flow and alert log metrics from the Network Firewall console. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic VMware Service__ + - ### Features + - Amazon Elastic VMware Service (Amazon EVS) allows you to run VMware Cloud Foundation (VCF) directly within your Amazon VPC including simplified self-managed migration experience with guided workflow in AWS console or via AWS CLI, get full access to their VCF deployment and VCF license portability. + +## __Amazon SageMaker Service__ + - ### Features + - Added support for p6-b200 instance type in SageMaker Training Jobs and Training Plans. + +## __Amazon Transcribe Service__ + - ### Features + - AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP + +## __Amazon Transcribe Streaming Service__ + - ### Features + - AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP + +## __S3 Transfer Manager__ + - ### Bugfixes + - DownloadFilter type incompatability methods overriden from extended interface + - Contributed by: [@jencymaryjoseph](https://github.com/jencymaryjoseph) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@jencymaryjoseph](https://github.com/jencymaryjoseph) +# __2.31.56__ __2025-06-03__ +## __AWS S3 Event Notifications__ + - ### Bugfixes + - Fixed parsing of S3 event notifications to allow eventTime to be null when eventName is not + - Contributed by: [@reifiedbeans](https://github.com/reifiedbeans) + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix NPE in `ProfileFileSupplier.defaultSupplier` when both credentials and config files do not exist. + +## __Amazon API Gateway__ + - ### Features + - Adds support to set the routing mode for a custom domain name. + +## __AmazonApiGatewayV2__ + - ### Features + - Adds support to create routing rules and set the routing mode for a custom domain name. + +## __EMR Serverless__ + - ### Features + - AWS EMR Serverless: Adds a new option in the CancelJobRun API in EMR 7.9.0+, to cancel a job with grace period. This feature is enabled by default with a 120-second grace period for streaming jobs and is not enabled by default for batch jobs. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@reifiedbeans](https://github.com/reifiedbeans) +# __2.31.55__ __2025-06-02__ +## __AWS Backup__ + - ### Features + - You can now subscribe to Amazon SNS notifications and Amazon EventBridge events for backup indexing. You can now receive notifications when a backup index is created, deleted, or fails to create, enhancing your ability to monitor and track your backup operations. + +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze Amazon Aurora database clusters and generate Aurora I/O-Optimized recommendations. + +## __AWS EntityResolution__ + - ### Features + - Add support for generating match IDs in near real-time. + +## __AWS Parallel Computing Service__ + - ### Features + - Introduces SUSPENDING and SUSPENDED states for clusters, compute node groups, and queues. + +## __AWS SDK for Java v2__ + - ### Features + - Improve the endpoint rules performance by directly passing the needed params instead of using a POJO to keep track of them. + - Updated endpoint and partition metadata. + +## __Agents for Amazon Bedrock__ + - ### Features + - This release adds the Agent Lifecycle Paused State feature to Amazon Bedrock agents. By using an agent's alias, you can temporarily suspend agent operations during maintenance, updates, or other situations. + +## __Amazon Athena__ + - ### Features + - Add support for the managed query result in the workgroup APIs. The managed query result configuration enables users to store query results to Athena owned storage. + +## __Amazon EC2 Container Service__ + - ### Features + - Updates Amazon ECS documentation to include note for upcoming default log driver mode change. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Add support for filtering ListInsights API calls on MISCONFIGURATION insight category + +## __Cost Optimization Hub__ + - ### Features + - Support recommendations for Aurora instance and Aurora cluster storage. + +## __Synthetics__ + - ### Features + - Support for Java runtime handler pattern. + +# __2.31.54__ __2025-05-30__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon SageMaker Service__ + - ### Features + - Release new parameter CapacityReservationConfig in ProductionVariant + +## __EMR Serverless__ + - ### Features + - This release adds the capability for users to specify an optional Execution IAM policy in the StartJobRun action. The resulting permissions assumed by the job run is the intersection of the permissions in the Execution Role and the specified Execution IAM Policy. + +# __2.31.53__ __2025-05-29__ +## __AWS Amplify__ + - ### Features + - Add support for customizable build instance sizes. CreateApp and UpdateApp operations now accept a new JobConfig parameter composed of BuildComputeType. + +## __AWS Billing and Cost Management Pricing Calculator__ + - ### Features + - Add AFTER_DISCOUNTS_AND_COMMITMENTS to Workload Estimate Rate Type. Set ListWorkLoadEstimateUsage maxResults range to minimum of 0 and maximum of 300. + +## __AWS CloudTrail__ + - ### Features + - CloudTrail Feature Release: Support for Enriched Events with Configurable Context for Event Data Store + +## __AWS Data Exchange__ + - ### Features + - This release adds Tag support for Event Action resource, through which customers can create event actions with Tags and retrieve event actions with Tags. + +## __AWS DataSync__ + - ### Features + - AgentArns field is made optional for Object Storage and Azure Blob location create requests. Location credentials are now managed via Secrets Manager, and may be encrypted with service managed or customer managed keys. Authentication is now optional for Azure Blob locations. + +## __Amazon Connect Service__ + - ### Features + - Amazon Connect Service Feature: Email Recipient Limit Increase + +## __Amazon FSx__ + - ### Features + - FSx API changes to support the public launch of new Intelligent Tiering storage class on Amazon FSx for Lustre + +## __Amazon Interactive Video Service RealTime__ + - ### Features + - IVS Real-Time now offers customers the participant replication that allow customers to copy a participant from one stage to another. + +## __Amazon SageMaker Service__ + - ### Features + - Add maintenance status field to DescribeMlflowTrackingServer API response + +## __Amazon Simple Storage Service__ + - ### Features + - Adding checksum support for S3 PutBucketOwnershipControls API. + +## __AmazonMWAA__ + - ### Features + - Amazon MWAA now lets you choose a worker replacement strategy when updating an environment. This release adds two worker replacement strategies: FORCED (default), which stops workers immediately, and GRACEFUL, which allows workers to finish current tasks before shutting down. + +## __Auto Scaling__ + - ### Features + - Add support for "apple" CpuManufacturer in ABIS + +# __2.31.52__ __2025-05-28__ +## __AWS Network Firewall__ + - ### Features + - You can now use VPC endpoint associations to create multiple firewall endpoints for a single firewall. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Enable the option to automatically delete underlying Amazon EBS snapshots when deregistering Amazon Machine Images (AMIs) + +## __Amazon EventBridge__ + - ### Features + - Allow for more than 2 characters for location codes in EventBridge ARNs + +## __Cost Optimization Hub__ + - ### Features + - This release allows customers to modify their preferred commitment term and payment options. + +## __Synthetics__ + - ### Features + - Add support to change ephemeral storage. Add a new field "TestResult" under CanaryRunStatus. + # __2.31.51__ __2025-05-27__ ## __AWS Cost Explorer Service__ - ### Features diff --git a/README.md b/README.md index 3ab4ff566562..fcb844803566 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![Maven](https://img.shields.io/maven-central/v/software.amazon.awssdk/s3.svg?label=Maven)](https://search.maven.org/search?q=g:%22software.amazon.awssdk%22%20AND%20a:%22s3%22) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-117-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-119-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -51,7 +51,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.31.51 + 2.31.64 pom import @@ -85,12 +85,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.31.51 + 2.31.64 software.amazon.awssdk s3 - 2.31.51 + 2.31.64 ``` @@ -102,7 +102,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.31.51 + 2.31.64 ``` @@ -340,6 +340,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Ran Vaknin
Ran Vaknin

💻 Greg Oledzki
Greg Oledzki

💻 Kevin Stich
Kevin Stich

💻 + Jency Joseph
Jency Joseph

💻 + Drew Davis
Drew Davis

💻 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 06787ade289d..8abfd4ef6ab8 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 53af6a82b9a7..14a2083c7a28 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index 616f854b80f0..b4e815a39f68 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix b/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix index 8574844e7283..846018dd9677 100755 --- a/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix +++ b/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import json import os @@ -14,8 +14,9 @@ SERVICE_DIR = os.path.join( def load_all_service_modules(): service_mapping = {} - for f in [f for f in os.listdir(SERVICE_DIR) if os.path.isdir(os.path.join(SERVICE_DIR, f)) & os.path.exists(os.path.join(SERVICE_DIR, f, 'target'))]: - for s in [s for s in os.listdir(os.path.join(SERVICE_DIR, f, 'target', 'generated-sources/sdk/software/amazon/awssdk/services', f)) if s.endswith('AsyncClient.java') & s.startswith('Default')]: + # Use logical AND (and) instead of bitwise AND (&) for boolean operations + for f in [f for f in os.listdir(SERVICE_DIR) if os.path.isdir(os.path.join(SERVICE_DIR, f)) and os.path.exists(os.path.join(SERVICE_DIR, f, 'target'))]: + for s in [s for s in os.listdir(os.path.join(SERVICE_DIR, f, 'target', 'generated-sources/sdk/software/amazon/awssdk/services', f)) if s.endswith('AsyncClient.java') and s.startswith('Default')]: service_mapping[f] = find_client_prefix(s) return service_mapping diff --git a/archetypes/pom.xml b/archetypes/pom.xml index a8578e5fb0f1..b9bcb7a10838 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 7c90e34b473b..4eb623b7ae0c 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../pom.xml aws-sdk-java @@ -2068,6 +2068,11 @@ Amazon AutoScaling, etc). ssmguiconnect ${awsjavasdk.version} + + software.amazon.awssdk + evs + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index dc9396a156cf..ca334aa4bc1f 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index ecc081ed74e9..89f7c5e718d4 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../pom.xml bom @@ -2268,6 +2268,11 @@ ssmguiconnect ${awsjavasdk.version} + + software.amazon.awssdk + evs + ${awsjavasdk.version} + diff --git a/buildspecs/validate-brazil-config.yml b/buildspecs/validate-brazil-config.yml new file mode 100644 index 000000000000..481aa791f4ab --- /dev/null +++ b/buildspecs/validate-brazil-config.yml @@ -0,0 +1,14 @@ +version: 0.2 + +phases: + install: + runtime-versions: + java: "$JAVA_RUNTIME" + python: 3.13 + + build: + commands: + - mvn clean install -P quick -T0.4C + - mvn exec:exec -Dexec.executable=pwd -pl !:aws-sdk-java-pom,!:sdk-benchmarks,!:module-path-tests -q 2>&1 > modules.txt + - mvn dependency:list -DexcludeTransitive=true -DincludeScope=runtime 2>&1 > deps.txt + - scripts/validate-brazil-config modules.txt deps.txt \ No newline at end of file diff --git a/bundle-logging-bridge/pom.xml b/bundle-logging-bridge/pom.xml index c80efd0cd294..9e515be9d1a2 100644 --- a/bundle-logging-bridge/pom.xml +++ b/bundle-logging-bridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bundle-logging-bridge jar diff --git a/bundle-sdk/pom.xml b/bundle-sdk/pom.xml index f162075d64a8..d60f67538687 100644 --- a/bundle-sdk/pom.xml +++ b/bundle-sdk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bundle-sdk jar diff --git a/bundle/pom.xml b/bundle/pom.xml index 149a7373905f..12fb06c6551e 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index f7f8e4a48db4..67ef5c5326ea 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java b/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java index e02358f3a67c..10422e931450 100644 --- a/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java +++ b/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.codegen.lite.regions.EndpointTagGenerator; import software.amazon.awssdk.codegen.lite.regions.PartitionMetadataGenerator; import software.amazon.awssdk.codegen.lite.regions.PartitionMetadataProviderGenerator; +import software.amazon.awssdk.codegen.lite.regions.PartitionsRegionsMetadataLoader; import software.amazon.awssdk.codegen.lite.regions.RegionGenerator; import software.amazon.awssdk.codegen.lite.regions.RegionMetadataGenerator; import software.amazon.awssdk.codegen.lite.regions.RegionMetadataLoader; @@ -36,6 +37,7 @@ import software.amazon.awssdk.codegen.lite.regions.ServiceMetadataGenerator; import software.amazon.awssdk.codegen.lite.regions.ServiceMetadataProviderGenerator; import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; import software.amazon.awssdk.utils.StringUtils; /** @@ -59,19 +61,24 @@ public class RegionGenerationMojo extends AbstractMojo { "${basedir}/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json") private File endpoints; + @Parameter(property = "partitionsJson", defaultValue = + "${basedir}/../../codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource") + private File partitionsJson; + @Override public void execute() throws MojoExecutionException { Path baseSourcesDirectory = Paths.get(outputDirectory).resolve("generated-sources").resolve("sdk"); Path testsDirectory = Paths.get(outputDirectory).resolve("generated-test-sources").resolve("sdk-tests"); Partitions partitions = RegionMetadataLoader.build(endpoints); + PartitionsRegionsMetadata regionPartitions = PartitionsRegionsMetadataLoader.build(partitionsJson); generatePartitionMetadataClass(baseSourcesDirectory, partitions); - generateRegionClass(baseSourcesDirectory, partitions); + generateRegionClass(baseSourcesDirectory, regionPartitions); generateServiceMetadata(baseSourcesDirectory, partitions); - generateRegions(baseSourcesDirectory, partitions); + generateRegions(baseSourcesDirectory, regionPartitions); generatePartitionProvider(baseSourcesDirectory, partitions); - generateRegionProvider(baseSourcesDirectory, partitions); + generateRegionProvider(baseSourcesDirectory, regionPartitions); generateServiceProvider(baseSourcesDirectory, partitions); generateEndpointTags(baseSourcesDirectory, partitions); @@ -88,7 +95,7 @@ public void generatePartitionMetadataClass(Path baseSourcesDirectory, Partitions REGION_BASE)).generate()); } - public void generateRegionClass(Path baseSourcesDirectory, Partitions partitions) { + public void generateRegionClass(Path baseSourcesDirectory, PartitionsRegionsMetadata partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(StringUtils.replace(REGION_BASE, ".", "/")); new CodeGenerator(sourcesDirectory.toString(), new RegionGenerator(partitions, REGION_BASE)).generate(); } @@ -105,7 +112,7 @@ public void generateServiceMetadata(Path baseSourcesDirectory, Partitions partit .generate()); } - public void generateRegions(Path baseSourcesDirectory, Partitions partitions) { + public void generateRegions(Path baseSourcesDirectory, PartitionsRegionsMetadata partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(StringUtils.replace(REGION_METADATA_BASE, ".", "/")); partitions.getPartitions() .forEach(p -> p.getRegions().forEach((k, v) -> @@ -126,7 +133,7 @@ public void generatePartitionProvider(Path baseSourcesDirectory, Partitions part .generate(); } - public void generateRegionProvider(Path baseSourcesDirectory, Partitions partitions) { + public void generateRegionProvider(Path baseSourcesDirectory, PartitionsRegionsMetadata partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(StringUtils.replace(REGION_BASE, ".", "/")); new CodeGenerator(sourcesDirectory.toString(), new RegionMetadataProviderGenerator(partitions, REGION_METADATA_BASE, diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 348cda80123b..869374d40ae4 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java new file mode 100644 index 000000000000..692ee7758081 --- /dev/null +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.lite.regions; + +import com.fasterxml.jackson.jr.ob.JSON; +import java.io.File; +import java.io.IOException; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; + +/** + * Loads and parses the partitions.json file into {@link PartitionsRegionsMetadata}. + */ +@SdkInternalApi +public final class PartitionsRegionsMetadataLoader { + + private PartitionsRegionsMetadataLoader() { + } + + public static PartitionsRegionsMetadata build(File path) { + return loadPartitionFromStream(path, path.toString()); + } + + private static PartitionsRegionsMetadata loadPartitionFromStream(File stream, String location) { + + try { + return JSON.std.with(JSON.Feature.USE_IS_GETTERS) + .beanFrom(PartitionsRegionsMetadata.class, stream); + + } catch (IOException | RuntimeException e) { + throw new RuntimeException("Error while loading partitions file from " + location, e); + } + } +} diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java index fa5467bc847f..27421f2b6b16 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java @@ -38,16 +38,16 @@ import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.codegen.lite.PoetClass; -import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.http.SdkHttpUtils; public class RegionGenerator implements PoetClass { - private final Partitions partitions; + private final PartitionsRegionsMetadata partitions; private final String basePackage; - public RegionGenerator(Partitions partitions, + public RegionGenerator(PartitionsRegionsMetadata partitions, String basePackage) { this.partitions = partitions; this.basePackage = basePackage; @@ -100,21 +100,15 @@ private void regions(TypeSpec.Builder builder) { .add("$T.unmodifiableList($T.asList(", Collections.class, Arrays.class); String regionsCodeBlock = regions.stream().map(r -> { + boolean isGlobal = r.contains("global"); builder.addField(FieldSpec.builder(className(), regionName(r)) .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S)", className(), r) + .initializer(isGlobal ? "$T.of($S, true)" : "$T.of($S)", className(), r) .build()); return regionName(r); }).collect(Collectors.joining(", ")); - addGlobalRegions(builder); - - regionsArray.add(regionsCodeBlock + ", ") - .add("AWS_GLOBAL, ") - .add("AWS_CN_GLOBAL, ") - .add("AWS_US_GOV_GLOBAL, ") - .add("AWS_ISO_GLOBAL, ") - .add("AWS_ISO_B_GLOBAL"); + regionsArray.add(regionsCodeBlock); regionsArray.add("))"); TypeName listOfRegions = ParameterizedTypeName.get(ClassName.get(List.class), className()); @@ -123,29 +117,6 @@ private void regions(TypeSpec.Builder builder) { .initializer(regionsArray.build()).build()); } - private void addGlobalRegions(TypeSpec.Builder builder) { - builder.addField(FieldSpec.builder(className(), "AWS_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_CN_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-cn-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_US_GOV_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-us-gov-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_ISO_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-iso-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_ISO_B_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-iso-b-global") - .build()); - } - private String regionName(String region) { return region.replace("-", "_").toUpperCase(Locale.US); } diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java index 92011139d2c6..b71e7c2bb671 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java @@ -32,17 +32,17 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.codegen.lite.PoetClass; import software.amazon.awssdk.codegen.lite.Utils; -import software.amazon.awssdk.codegen.lite.regions.model.Partition; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionRegionsMetadata; public class RegionMetadataGenerator implements PoetClass { - private final Partition partition; + private final PartitionRegionsMetadata partition; private final String region; private final String regionDescription; private final String basePackage; private final String regionBasePackage; - public RegionMetadataGenerator(Partition partition, + public RegionMetadataGenerator(PartitionRegionsMetadata partition, String region, String regionDescription, String basePackage, @@ -65,9 +65,9 @@ public TypeSpec poetClass() { .addModifiers(FINAL) .addSuperinterface(ClassName.get(regionBasePackage, "RegionMetadata")) .addField(staticFinalField("ID", region)) - .addField(staticFinalField("DOMAIN", partition.getDnsSuffix())) + .addField(staticFinalField("DOMAIN", partition.getOutputs().getDnsSuffix())) .addField(staticFinalField("DESCRIPTION", regionDescription)) - .addField(staticFinalField("PARTITION_ID", partition.getPartition())) + .addField(staticFinalField("PARTITION_ID", partition.getId())) .addMethod(getter("id", "ID")) .addMethod(getter("domain", "DOMAIN")) .addMethod(getter("description", "DESCRIPTION")) diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java index 0203bbbfb649..fdd16ab73eec 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java @@ -36,16 +36,16 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.codegen.lite.PoetClass; import software.amazon.awssdk.codegen.lite.Utils; -import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; import software.amazon.awssdk.utils.ImmutableMap; public class RegionMetadataProviderGenerator implements PoetClass { - private final Partitions partitions; + private final PartitionsRegionsMetadata partitions; private final String basePackage; private final String regionBasePackage; - public RegionMetadataProviderGenerator(Partitions partitions, + public RegionMetadataProviderGenerator(PartitionsRegionsMetadata partitions, String basePackage, String regionBasePackage) { this.partitions = partitions; @@ -79,7 +79,7 @@ public ClassName className() { return ClassName.get(regionBasePackage, "GeneratedRegionMetadataProvider"); } - private CodeBlock regions(Partitions partitions) { + private CodeBlock regions(PartitionsRegionsMetadata partitions) { CodeBlock.Builder builder = CodeBlock.builder().add("$T.builder()", ImmutableMap.class); partitions.getPartitions() diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java index e225d47bfd82..f8b030f64edb 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java @@ -67,9 +67,9 @@ public Partition() { public Partition(@JsonProperty(value = "partition") String partition, @JsonProperty(value = "regions") Map - regions, + regions, @JsonProperty(value = "services") Map services) { + Service> services) { this.partition = Validate.paramNotNull(partition, "Partition"); this.regions = regions; this.services = services; @@ -186,4 +186,4 @@ private boolean hasServiceEndpoint(String endpoint) { } return false; } -} +} \ No newline at end of file diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java new file mode 100644 index 000000000000..f780e766f71e --- /dev/null +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java @@ -0,0 +1,177 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.lite.regions.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class models a single partition from partitions.json. + */ +@SdkInternalApi +public final class PartitionRegionsMetadata { + private String id; + private PartitionOutputs outputs; + private String regionRegex; + private Map regions; + + public PartitionRegionsMetadata() { + } + + public PartitionRegionsMetadata(@JsonProperty(value = "id") String id, + @JsonProperty(value = "outputs") PartitionOutputs outputs, + @JsonProperty(value = "regionRegex") String regionRegex, + @JsonProperty(value = "regions") Map regions) { + this.id = id; + this.outputs = outputs; + this.regionRegex = regionRegex; + this.regions = regions; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public PartitionOutputs getOutputs() { + return outputs; + } + + public void setOutputs(PartitionOutputs outputs) { + this.outputs = outputs; + } + + public String getRegionRegex() { + return regionRegex; + } + + public void setRegionRegex(String regionRegex) { + this.regionRegex = regionRegex; + } + + public Map getRegions() { + return regions; + } + + public void setRegions(Map regions) { + this.regions = regions; + } + + /** + * This class models the outputs field of a partition in partitions.json. + */ + @SdkInternalApi + public static final class PartitionOutputs { + private String dnsSuffix; + private String dualStackDnsSuffix; + private String implicitGlobalRegion; + private String name; + private boolean supportsDualStack; + private boolean supportsFIPS; + + public PartitionOutputs() { + } + + public PartitionOutputs(@JsonProperty(value = "dnsSuffix") String dnsSuffix, + @JsonProperty(value = "dualStackDnsSuffix") String dualStackDnsSuffix, + @JsonProperty(value = "implicitGlobalRegion") String implicitGlobalRegion, + @JsonProperty(value = "name") String name, + @JsonProperty(value = "supportsDualStack") boolean supportsDualStack, + @JsonProperty(value = "supportsFIPS") boolean supportsFIPS) { + this.dnsSuffix = dnsSuffix; + this.dualStackDnsSuffix = dualStackDnsSuffix; + this.implicitGlobalRegion = implicitGlobalRegion; + this.name = name; + this.supportsDualStack = supportsDualStack; + this.supportsFIPS = supportsFIPS; + } + + public String getDnsSuffix() { + return dnsSuffix; + } + + public void setDnsSuffix(String dnsSuffix) { + this.dnsSuffix = dnsSuffix; + } + + public String getDualStackDnsSuffix() { + return dualStackDnsSuffix; + } + + public void setDualStackDnsSuffix(String dualStackDnsSuffix) { + this.dualStackDnsSuffix = dualStackDnsSuffix; + } + + public String getImplicitGlobalRegion() { + return implicitGlobalRegion; + } + + public void setImplicitGlobalRegion(String implicitGlobalRegion) { + this.implicitGlobalRegion = implicitGlobalRegion; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isSupportsDualStack() { + return supportsDualStack; + } + + public void setSupportsDualStack(boolean supportsDualStack) { + this.supportsDualStack = supportsDualStack; + } + + public boolean isSupportsFIPS() { + return supportsFIPS; + } + + public void setSupportsFIPS(boolean supportsFIPS) { + this.supportsFIPS = supportsFIPS; + } + } + + /** + * This class models a region in partitions.json. + */ + @SdkInternalApi + public static final class RegionMetadata { + private String description; + + public RegionMetadata() { + } + + public RegionMetadata(@JsonProperty(value = "description") String description) { + this.description = description; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + } +} diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java new file mode 100644 index 000000000000..7c5092e41c66 --- /dev/null +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.lite.regions.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * This class models the AWS partitions metadata from partitions.json. + */ +@SdkInternalApi +public final class PartitionsRegionsMetadata { + private List partitions; + private String version; + + public PartitionsRegionsMetadata() { + } + + public PartitionsRegionsMetadata(@JsonProperty(value = "partitions") List partitions, + @JsonProperty(value = "version") String version) { + this.partitions = Validate.paramNotNull(partitions, "partitions"); + this.version = Validate.paramNotNull(version, "version"); + } + + public List getPartitions() { + return partitions; + } + + public void setPartitions(List partitions) { + this.partitions = partitions; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } +} diff --git a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java index 0240958a17d9..2ace744a0392 100644 --- a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java +++ b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java @@ -21,35 +21,42 @@ import java.nio.file.Paths; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import software.amazon.awssdk.codegen.lite.regions.model.Partition; import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionRegionsMetadata; public class RegionGenerationTest { private static final String ENDPOINTS = "/software/amazon/awssdk/codegen/lite/test-endpoints.json"; + private static final String PARTITIONS = "/software/amazon/awssdk/codegen/lite/test-partitions.json.resource"; private static final String SERVICE_METADATA_BASE = "software.amazon.awssdk.regions.servicemetadata"; private static final String REGION_METADATA_BASE = "software.amazon.awssdk.regions.regionmetadata"; private static final String PARTITION_METADATA_BASE = "software.amazon.awssdk.regions.partitionmetadata"; private static final String REGION_BASE = "software.amazon.awssdk.regions"; private File endpoints; + private File partitionsFile; private Partitions partitions; + private PartitionsRegionsMetadata partitionsRegions; + @BeforeEach public void before() throws Exception { this.endpoints = Paths.get(getClass().getResource(ENDPOINTS).toURI()).toFile(); + this.partitionsFile = Paths.get(getClass().getResource(PARTITIONS).toURI()).toFile(); this.partitions = RegionMetadataLoader.build(endpoints); + this.partitionsRegions = PartitionsRegionsMetadataLoader.build(partitionsFile); } @Test public void regionClass() { - RegionGenerator regions = new RegionGenerator(partitions, REGION_BASE); + RegionGenerator regions = new RegionGenerator(partitionsRegions, REGION_BASE); assertThat(regions, generatesTo("regions.java")); } @Test public void regionMetadataClass() { - Partition partition = partitions.getPartitions().get(0); + PartitionRegionsMetadata partition = partitionsRegions.getPartitions().get(0); RegionMetadataGenerator metadataGenerator = new RegionMetadataGenerator(partition, "us-east-1", "US East (N. Virginia)", @@ -61,7 +68,7 @@ public void regionMetadataClass() { @Test public void regionMetadataProviderClass() { - RegionMetadataProviderGenerator providerGenerator = new RegionMetadataProviderGenerator(partitions, + RegionMetadataProviderGenerator providerGenerator = new RegionMetadataProviderGenerator(partitionsRegions, REGION_METADATA_BASE, REGION_BASE); assertThat(providerGenerator, generatesTo("region-metadata-provider.java")); diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java index 9380df896357..798644de1fef 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java @@ -9,18 +9,37 @@ import software.amazon.awssdk.regions.regionmetadata.ApNortheast2; import software.amazon.awssdk.regions.regionmetadata.ApNortheast3; import software.amazon.awssdk.regions.regionmetadata.ApSouth1; +import software.amazon.awssdk.regions.regionmetadata.ApSouth2; import software.amazon.awssdk.regions.regionmetadata.ApSoutheast1; import software.amazon.awssdk.regions.regionmetadata.ApSoutheast2; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast3; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast4; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast5; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast7; +import software.amazon.awssdk.regions.regionmetadata.AwsCnGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsIsoBGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsIsoFGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsIsoGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsUsGovGlobal; import software.amazon.awssdk.regions.regionmetadata.CaCentral1; +import software.amazon.awssdk.regions.regionmetadata.CaWest1; import software.amazon.awssdk.regions.regionmetadata.CnNorth1; import software.amazon.awssdk.regions.regionmetadata.CnNorthwest1; import software.amazon.awssdk.regions.regionmetadata.EuCentral1; +import software.amazon.awssdk.regions.regionmetadata.EuCentral2; +import software.amazon.awssdk.regions.regionmetadata.EuIsoeWest1; import software.amazon.awssdk.regions.regionmetadata.EuNorth1; import software.amazon.awssdk.regions.regionmetadata.EuSouth1; +import software.amazon.awssdk.regions.regionmetadata.EuSouth2; import software.amazon.awssdk.regions.regionmetadata.EuWest1; import software.amazon.awssdk.regions.regionmetadata.EuWest2; import software.amazon.awssdk.regions.regionmetadata.EuWest3; +import software.amazon.awssdk.regions.regionmetadata.EuscDeEast1; +import software.amazon.awssdk.regions.regionmetadata.IlCentral1; +import software.amazon.awssdk.regions.regionmetadata.MeCentral1; import software.amazon.awssdk.regions.regionmetadata.MeSouth1; +import software.amazon.awssdk.regions.regionmetadata.MxCentral1; import software.amazon.awssdk.regions.regionmetadata.SaEast1; import software.amazon.awssdk.regions.regionmetadata.UsEast1; import software.amazon.awssdk.regions.regionmetadata.UsEast2; @@ -29,6 +48,8 @@ import software.amazon.awssdk.regions.regionmetadata.UsIsoEast1; import software.amazon.awssdk.regions.regionmetadata.UsIsoWest1; import software.amazon.awssdk.regions.regionmetadata.UsIsobEast1; +import software.amazon.awssdk.regions.regionmetadata.UsIsofEast1; +import software.amazon.awssdk.regions.regionmetadata.UsIsofSouth1; import software.amazon.awssdk.regions.regionmetadata.UsWest1; import software.amazon.awssdk.regions.regionmetadata.UsWest2; import software.amazon.awssdk.utils.ImmutableMap; @@ -40,15 +61,26 @@ public final class GeneratedRegionMetadataProvider implements RegionMetadataProv .put(Region.AF_SOUTH_1, new AfSouth1()).put(Region.AP_EAST_1, new ApEast1()) .put(Region.AP_NORTHEAST_1, new ApNortheast1()).put(Region.AP_NORTHEAST_2, new ApNortheast2()) .put(Region.AP_NORTHEAST_3, new ApNortheast3()).put(Region.AP_SOUTH_1, new ApSouth1()) - .put(Region.AP_SOUTHEAST_1, new ApSoutheast1()).put(Region.AP_SOUTHEAST_2, new ApSoutheast2()) - .put(Region.CA_CENTRAL_1, new CaCentral1()).put(Region.EU_CENTRAL_1, new EuCentral1()) - .put(Region.EU_NORTH_1, new EuNorth1()).put(Region.EU_SOUTH_1, new EuSouth1()).put(Region.EU_WEST_1, new EuWest1()) - .put(Region.EU_WEST_2, new EuWest2()).put(Region.EU_WEST_3, new EuWest3()).put(Region.ME_SOUTH_1, new MeSouth1()) + .put(Region.AP_SOUTH_2, new ApSouth2()).put(Region.AP_SOUTHEAST_1, new ApSoutheast1()) + .put(Region.AP_SOUTHEAST_2, new ApSoutheast2()).put(Region.AP_SOUTHEAST_3, new ApSoutheast3()) + .put(Region.AP_SOUTHEAST_4, new ApSoutheast4()).put(Region.AP_SOUTHEAST_5, new ApSoutheast5()) + .put(Region.AP_SOUTHEAST_7, new ApSoutheast7()).put(Region.AWS_GLOBAL, new AwsGlobal()) + .put(Region.CA_CENTRAL_1, new CaCentral1()).put(Region.CA_WEST_1, new CaWest1()) + .put(Region.EU_CENTRAL_1, new EuCentral1()).put(Region.EU_CENTRAL_2, new EuCentral2()) + .put(Region.EU_NORTH_1, new EuNorth1()).put(Region.EU_SOUTH_1, new EuSouth1()).put(Region.EU_SOUTH_2, new EuSouth2()) + .put(Region.EU_WEST_1, new EuWest1()).put(Region.EU_WEST_2, new EuWest2()).put(Region.EU_WEST_3, new EuWest3()) + .put(Region.IL_CENTRAL_1, new IlCentral1()).put(Region.ME_CENTRAL_1, new MeCentral1()) + .put(Region.ME_SOUTH_1, new MeSouth1()).put(Region.MX_CENTRAL_1, new MxCentral1()) .put(Region.SA_EAST_1, new SaEast1()).put(Region.US_EAST_1, new UsEast1()).put(Region.US_EAST_2, new UsEast2()) - .put(Region.US_WEST_1, new UsWest1()).put(Region.US_WEST_2, new UsWest2()).put(Region.CN_NORTH_1, new CnNorth1()) - .put(Region.CN_NORTHWEST_1, new CnNorthwest1()).put(Region.US_GOV_EAST_1, new UsGovEast1()) - .put(Region.US_GOV_WEST_1, new UsGovWest1()).put(Region.US_ISO_EAST_1, new UsIsoEast1()) - .put(Region.US_ISO_WEST_1, new UsIsoWest1()).put(Region.US_ISOB_EAST_1, new UsIsobEast1()).build(); + .put(Region.US_WEST_1, new UsWest1()).put(Region.US_WEST_2, new UsWest2()) + .put(Region.AWS_CN_GLOBAL, new AwsCnGlobal()).put(Region.CN_NORTH_1, new CnNorth1()) + .put(Region.CN_NORTHWEST_1, new CnNorthwest1()).put(Region.AWS_US_GOV_GLOBAL, new AwsUsGovGlobal()) + .put(Region.US_GOV_EAST_1, new UsGovEast1()).put(Region.US_GOV_WEST_1, new UsGovWest1()) + .put(Region.AWS_ISO_GLOBAL, new AwsIsoGlobal()).put(Region.US_ISO_EAST_1, new UsIsoEast1()) + .put(Region.US_ISO_WEST_1, new UsIsoWest1()).put(Region.AWS_ISO_B_GLOBAL, new AwsIsoBGlobal()) + .put(Region.US_ISOB_EAST_1, new UsIsobEast1()).put(Region.EU_ISOE_WEST_1, new EuIsoeWest1()) + .put(Region.AWS_ISO_F_GLOBAL, new AwsIsoFGlobal()).put(Region.US_ISOF_EAST_1, new UsIsofEast1()) + .put(Region.US_ISOF_SOUTH_1, new UsIsofSouth1()).put(Region.EUSC_DE_EAST_1, new EuscDeEast1()).build(); public RegionMetadata regionMetadata(Region region) { return REGION_METADATA.get(region); diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java index e007c4fbaf79..9935ffd3f4d9 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java @@ -36,17 +36,11 @@ @SdkPublicApi @Generated("software.amazon.awssdk:codegen") public final class Region { - public static final Region AP_SOUTH_1 = Region.of("ap-south-1"); - - public static final Region EU_SOUTH_1 = Region.of("eu-south-1"); - - public static final Region US_GOV_EAST_1 = Region.of("us-gov-east-1"); + public static final Region ME_CENTRAL_1 = Region.of("me-central-1"); - public static final Region CA_CENTRAL_1 = Region.of("ca-central-1"); + public static final Region AWS_CN_GLOBAL = Region.of("aws-cn-global", true); - public static final Region EU_CENTRAL_1 = Region.of("eu-central-1"); - - public static final Region US_ISO_WEST_1 = Region.of("us-iso-west-1"); + public static final Region US_ISOF_SOUTH_1 = Region.of("us-isof-south-1"); public static final Region US_WEST_1 = Region.of("us-west-1"); @@ -54,14 +48,6 @@ public final class Region { public static final Region AF_SOUTH_1 = Region.of("af-south-1"); - public static final Region EU_NORTH_1 = Region.of("eu-north-1"); - - public static final Region EU_WEST_3 = Region.of("eu-west-3"); - - public static final Region EU_WEST_2 = Region.of("eu-west-2"); - - public static final Region EU_WEST_1 = Region.of("eu-west-1"); - public static final Region AP_NORTHEAST_3 = Region.of("ap-northeast-3"); public static final Region AP_NORTHEAST_2 = Region.of("ap-northeast-2"); @@ -72,41 +58,89 @@ public final class Region { public static final Region SA_EAST_1 = Region.of("sa-east-1"); - public static final Region AP_EAST_1 = Region.of("ap-east-1"); - public static final Region CN_NORTH_1 = Region.of("cn-north-1"); - public static final Region US_GOV_WEST_1 = Region.of("us-gov-west-1"); - public static final Region AP_SOUTHEAST_1 = Region.of("ap-southeast-1"); public static final Region AP_SOUTHEAST_2 = Region.of("ap-southeast-2"); - public static final Region US_ISO_EAST_1 = Region.of("us-iso-east-1"); + public static final Region AP_SOUTHEAST_3 = Region.of("ap-southeast-3"); + + public static final Region AP_SOUTHEAST_4 = Region.of("ap-southeast-4"); + + public static final Region AP_SOUTHEAST_5 = Region.of("ap-southeast-5"); public static final Region US_EAST_1 = Region.of("us-east-1"); public static final Region US_EAST_2 = Region.of("us-east-2"); + public static final Region AP_SOUTHEAST_7 = Region.of("ap-southeast-7"); + public static final Region CN_NORTHWEST_1 = Region.of("cn-northwest-1"); - public static final Region US_ISOB_EAST_1 = Region.of("us-isob-east-1"); + public static final Region AP_SOUTH_2 = Region.of("ap-south-2"); + + public static final Region AP_SOUTH_1 = Region.of("ap-south-1"); + + public static final Region EU_SOUTH_1 = Region.of("eu-south-1"); + + public static final Region EU_SOUTH_2 = Region.of("eu-south-2"); + + public static final Region US_GOV_EAST_1 = Region.of("us-gov-east-1"); + + public static final Region IL_CENTRAL_1 = Region.of("il-central-1"); + + public static final Region CA_CENTRAL_1 = Region.of("ca-central-1"); + + public static final Region MX_CENTRAL_1 = Region.of("mx-central-1"); + + public static final Region EU_CENTRAL_1 = Region.of("eu-central-1"); + + public static final Region US_ISO_WEST_1 = Region.of("us-iso-west-1"); + + public static final Region EUSC_DE_EAST_1 = Region.of("eusc-de-east-1"); + + public static final Region EU_CENTRAL_2 = Region.of("eu-central-2"); + + public static final Region EU_ISOE_WEST_1 = Region.of("eu-isoe-west-1"); public static final Region AWS_GLOBAL = Region.of("aws-global", true); - public static final Region AWS_CN_GLOBAL = Region.of("aws-cn-global", true); + public static final Region EU_NORTH_1 = Region.of("eu-north-1"); - public static final Region AWS_US_GOV_GLOBAL = Region.of("aws-us-gov-global", true); + public static final Region EU_WEST_3 = Region.of("eu-west-3"); + + public static final Region EU_WEST_2 = Region.of("eu-west-2"); + + public static final Region EU_WEST_1 = Region.of("eu-west-1"); public static final Region AWS_ISO_GLOBAL = Region.of("aws-iso-global", true); + public static final Region AP_EAST_1 = Region.of("ap-east-1"); + + public static final Region CA_WEST_1 = Region.of("ca-west-1"); + + public static final Region US_GOV_WEST_1 = Region.of("us-gov-west-1"); + + public static final Region US_ISO_EAST_1 = Region.of("us-iso-east-1"); + public static final Region AWS_ISO_B_GLOBAL = Region.of("aws-iso-b-global", true); - private static final List REGIONS = Collections.unmodifiableList(Arrays.asList(AP_SOUTH_1, EU_SOUTH_1, US_GOV_EAST_1, - CA_CENTRAL_1, EU_CENTRAL_1, US_ISO_WEST_1, US_WEST_1, US_WEST_2, AF_SOUTH_1, EU_NORTH_1, EU_WEST_3, EU_WEST_2, - EU_WEST_1, AP_NORTHEAST_3, AP_NORTHEAST_2, AP_NORTHEAST_1, ME_SOUTH_1, SA_EAST_1, AP_EAST_1, CN_NORTH_1, - US_GOV_WEST_1, AP_SOUTHEAST_1, AP_SOUTHEAST_2, US_ISO_EAST_1, US_EAST_1, US_EAST_2, CN_NORTHWEST_1, US_ISOB_EAST_1, - AWS_GLOBAL, AWS_CN_GLOBAL, AWS_US_GOV_GLOBAL, AWS_ISO_GLOBAL, AWS_ISO_B_GLOBAL)); + public static final Region AWS_ISO_F_GLOBAL = Region.of("aws-iso-f-global", true); + + public static final Region AWS_US_GOV_GLOBAL = Region.of("aws-us-gov-global", true); + + public static final Region US_ISOB_EAST_1 = Region.of("us-isob-east-1"); + + public static final Region US_ISOF_EAST_1 = Region.of("us-isof-east-1"); + + private static final List REGIONS = Collections.unmodifiableList(Arrays.asList(ME_CENTRAL_1, AWS_CN_GLOBAL, + US_ISOF_SOUTH_1, US_WEST_1, US_WEST_2, AF_SOUTH_1, AP_NORTHEAST_3, AP_NORTHEAST_2, AP_NORTHEAST_1, ME_SOUTH_1, + SA_EAST_1, CN_NORTH_1, AP_SOUTHEAST_1, AP_SOUTHEAST_2, AP_SOUTHEAST_3, AP_SOUTHEAST_4, AP_SOUTHEAST_5, US_EAST_1, + US_EAST_2, AP_SOUTHEAST_7, CN_NORTHWEST_1, AP_SOUTH_2, AP_SOUTH_1, EU_SOUTH_1, EU_SOUTH_2, US_GOV_EAST_1, + IL_CENTRAL_1, CA_CENTRAL_1, MX_CENTRAL_1, EU_CENTRAL_1, US_ISO_WEST_1, EUSC_DE_EAST_1, EU_CENTRAL_2, EU_ISOE_WEST_1, + AWS_GLOBAL, EU_NORTH_1, EU_WEST_3, EU_WEST_2, EU_WEST_1, AWS_ISO_GLOBAL, AP_EAST_1, CA_WEST_1, US_GOV_WEST_1, + US_ISO_EAST_1, AWS_ISO_B_GLOBAL, AWS_ISO_F_GLOBAL, AWS_US_GOV_GLOBAL, US_ISOB_EAST_1, US_ISOF_EAST_1)); private final boolean isGlobalRegion; diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource new file mode 100644 index 000000000000..3d7e9530924e --- /dev/null +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource @@ -0,0 +1,255 @@ +{ + "partitions" : [ { + "id" : "aws", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-east-1", + "name" : "aws", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "ap-southeast-5" : { + "description" : "Asia Pacific (Malaysia)" + }, + "ap-southeast-7" : { + "description" : "Asia Pacific (Thailand)" + }, + "aws-global" : { + "description" : "AWS Standard global region" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "ca-west-1" : { + "description" : "Canada West (Calgary)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "mx-central-1" : { + "description" : "Mexico (Central)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + } + }, { + "id" : "aws-cn", + "outputs" : { + "dnsSuffix" : "amazonaws.com.cn", + "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "implicitGlobalRegion" : "cn-northwest-1", + "name" : "aws-cn", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "aws-cn-global" : { + "description" : "AWS China global region" + }, + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "AWS GovCloud (US) global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } + }, { + "id" : "aws-iso", + "outputs" : { + "dnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "c2s.ic.gov", + "implicitGlobalRegion" : "us-iso-east-1", + "name" : "aws-iso", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-global" : { + "description" : "AWS ISO (US) global region" + }, + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + } + }, { + "id" : "aws-iso-b", + "outputs" : { + "dnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "sc2s.sgov.gov", + "implicitGlobalRegion" : "us-isob-east-1", + "name" : "aws-iso-b", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-b-global" : { + "description" : "AWS ISOB (US) global region" + }, + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + } + }, { + "id" : "aws-iso-e", + "outputs" : { + "dnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "cloud.adc-e.uk", + "implicitGlobalRegion" : "eu-isoe-west-1", + "name" : "aws-iso-e", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "implicitGlobalRegion" : "us-isof-south-1", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-f-global" : { + "description" : "AWS ISOF global region" + }, + "us-isof-east-1" : { + "description" : "US ISOF EAST" + }, + "us-isof-south-1" : { + "description" : "US ISOF SOUTH" + } + } + }, { + "id" : "aws-eusc", + "outputs" : { + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "amazonaws.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regions" : { + "eusc-de-east-1" : { + "description" : "EU (Germany)" + } + } + } ], + "version" : "1.1" +} \ No newline at end of file diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 0533043d5e7f..b7f34743d758 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index d539fc07f4d4..a570302d93ac 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java index 56ed58996ac8..41d1f32693f8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java @@ -28,6 +28,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.naming.DefaultNamingStrategy; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringUtils; @@ -70,7 +71,7 @@ public static Metadata constructMetadata(ServiceModel serviceModel, .withBaseExceptionName(String.format(Constant.BASE_EXCEPTION_NAME_PATTERN, serviceName)) .withBaseRequestName(String.format(Constant.BASE_REQUEST_NAME_PATTERN, serviceName)) .withBaseResponseName(String.format(Constant.BASE_RESPONSE_NAME_PATTERN, serviceName)) - .withProtocol(Protocol.fromValue(serviceMetadata.getProtocol())) + .withProtocol(Protocol.fromValue(ProtocolUtils.resolveProtocol(serviceMetadata))) .withJsonVersion(serviceMetadata.getJsonVersion()) .withEndpointPrefix(serviceMetadata.getEndpointPrefix()) .withSigningName(serviceMetadata.getSigningName()) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java index 1f247fe61b9f..6953ce6b066f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; /** * Constructs the operation model for every operation defined by the service. @@ -164,7 +165,7 @@ public Map constructOperations() { OperationModel operationModel = new OperationModel(); operationModel.setOperationName(operationName); - operationModel.setServiceProtocol(serviceModel.getMetadata().getProtocol()); + operationModel.setServiceProtocol(ProtocolUtils.resolveProtocol(serviceModel.getMetadata())); operationModel.setDeprecated(op.isDeprecated()); operationModel.setDeprecatedMessage(op.getDeprecatedMessage()); operationModel.setDocumentation(op.getDocumentation()); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java index f6d47aac8686..46b15ae7dbbd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; import software.amazon.awssdk.codegen.validation.ModelInvalidException; import software.amazon.awssdk.codegen.validation.ValidationEntry; import software.amazon.awssdk.codegen.validation.ValidationErrorId; @@ -479,6 +480,6 @@ private void fillContainerTypeMemberMetadata(Map c2jShapes, } protected String getProtocol() { - return getServiceModel().getMetadata().getProtocol(); + return ProtocolUtils.resolveProtocol(getServiceModel().getMetadata()); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java index 273c231b136d..74ab28dd3417 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.codegen.model.service.Output; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; /** * Operations with explicit String payloads are not supported for services with Query protocol. We fail the codegen if the @@ -31,7 +32,7 @@ public class ExplicitStringPayloadQueryProtocolProcessor implements CodegenCustomizationProcessor { @Override public void preprocess(ServiceModel serviceModel) { - String protocol = serviceModel.getMetadata().getProtocol(); + String protocol = ProtocolUtils.resolveProtocol(serviceModel.getMetadata()); if (!"ec2".equals(protocol) && !"query".equals(protocol)) { return; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java index 1c0786425507..6e4441203516 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.codegen.customization.processors; +import java.util.Collections; import software.amazon.awssdk.codegen.customization.CodegenCustomizationProcessor; import software.amazon.awssdk.codegen.model.config.customization.MetadataConfig; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; @@ -44,7 +45,7 @@ public void preprocess(ServiceModel serviceModel) { String customProtocol = metadataConfig.getProtocol(); if (customProtocol != null) { - serviceMetadata.setProtocol(customProtocol); + serviceMetadata.setProtocols(Collections.singletonList(customProtocol)); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java index 7db556fa63ed..2bfb74bef54c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.codegen.model.service.Http; import software.amazon.awssdk.codegen.model.service.Operation; import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; /** * This processor only runs for services using the smithy-rpc-v2-cbor protocol. @@ -29,7 +30,8 @@ public class SmithyRpcV2CborProtocolProcessor implements CodegenCustomizationProcessor { @Override public void preprocess(ServiceModel serviceModel) { - if (!"smithy-rpc-v2-cbor".equals(serviceModel.getMetadata().getProtocol())) { + String protocol = ProtocolUtils.resolveProtocol(serviceModel.getMetadata()); + if (!"smithy-rpc-v2-cbor".equals(protocol)) { return; } serviceModel.getOperations().forEach((name, op) -> setRequestUri(serviceModel, name, op)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java index fbcec7931bd8..38c170898f27 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java @@ -28,6 +28,7 @@ import software.amazon.awssdk.codegen.poet.auth.scheme.EndpointAwareAuthSchemeParamsSpec; import software.amazon.awssdk.codegen.poet.auth.scheme.EndpointBasedAuthSchemeProviderSpec; import software.amazon.awssdk.codegen.poet.auth.scheme.ModelBasedAuthSchemeProviderSpec; +import software.amazon.awssdk.codegen.poet.auth.scheme.PreferredAuthSchemeProviderSpec; public final class AuthSchemeGeneratorTasks extends BaseGeneratorTasks { private final GeneratorTaskParams generatorTaskParams; @@ -45,6 +46,7 @@ protected List createTasks() { tasks.add(generateProviderInterface()); tasks.add(generateDefaultParamsImpl()); tasks.add(generateModelBasedProvider()); + tasks.add(generatePreferenceProvider()); tasks.add(generateAuthSchemeInterceptor()); if (authSchemeSpecUtils.useEndpointBasedAuthProvider()) { tasks.add(generateEndpointBasedProvider()); @@ -69,6 +71,10 @@ private GeneratorTask generateModelBasedProvider() { return new PoetGeneratorTask(authSchemeInternalDir(), model.getFileHeader(), new ModelBasedAuthSchemeProviderSpec(model)); } + private GeneratorTask generatePreferenceProvider() { + return new PoetGeneratorTask(authSchemeInternalDir(), model.getFileHeader(), new PreferredAuthSchemeProviderSpec(model)); + } + private GeneratorTask generateEndpointBasedProvider() { return new PoetGeneratorTask(authSchemeInternalDir(), model.getFileHeader(), new EndpointBasedAuthSchemeProviderSpec(model)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java index 7d407f582f7d..1bafba5eed5d 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java @@ -15,11 +15,12 @@ package software.amazon.awssdk.codegen.emitters.tasks; -import java.util.Arrays; +import java.util.ArrayList; import java.util.List; import software.amazon.awssdk.codegen.emitters.GeneratorTask; import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; import software.amazon.awssdk.codegen.emitters.PoetGeneratorTask; +import software.amazon.awssdk.codegen.poet.client.EnvironmentTokenSystemSettingsClass; import software.amazon.awssdk.codegen.poet.client.SdkClientOptions; import software.amazon.awssdk.codegen.poet.common.UserAgentUtilsSpec; @@ -33,7 +34,13 @@ public CommonInternalGeneratorTasks(GeneratorTaskParams params) { @Override protected List createTasks() throws Exception { - return Arrays.asList(createClientOptionTask(), createUserAgentTask()); + List tasks = new ArrayList<>(); + tasks.add(createClientOptionTask()); + tasks.add(createUserAgentTask()); + if (params.getModel().getCustomizationConfig().isEnableEnvironmentBearerToken()) { + tasks.add(createEnvironmentTokenSystemSettingTask()); + } + return tasks; } private PoetGeneratorTask createClientOptionTask() { @@ -46,6 +53,11 @@ private PoetGeneratorTask createUserAgentTask() { new UserAgentUtilsSpec(params.getModel())); } + private GeneratorTask createEnvironmentTokenSystemSettingTask() { + return new PoetGeneratorTask(clientOptionsDir(), params.getModel().getFileHeader(), + new EnvironmentTokenSystemSettingsClass(params.getModel())); + } + private String clientOptionsDir() { return params.getPathProvider().getClientInternalDirectory(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java index 1efeefb9b27a..becbdfd18a32 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.model.service.XmlNamespace; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.StringUtils; @@ -331,11 +332,13 @@ public static ShapeMarshaller createInputShapeMarshaller(ServiceMetadata service "The operation parameter must be specified!"); } + String protocol = ProtocolUtils.resolveProtocol(service); + ShapeMarshaller marshaller = new ShapeMarshaller() .withAction(operation.getName()) .withVerb(operation.getHttp().getMethod()) .withRequestUri(operation.getHttp().getRequestUri()) - .withProtocol(service.getProtocol()); + .withProtocol(protocol); Input input = operation.getInput(); if (input != null) { marshaller.setLocationName(input.getLocationName()); @@ -345,7 +348,7 @@ public static ShapeMarshaller createInputShapeMarshaller(ServiceMetadata service marshaller.setXmlNameSpaceUri(xmlNamespace.getUri()); } } - if (Metadata.usesOperationIdentifier(service.getProtocol())) { + if (Metadata.usesOperationIdentifier(protocol)) { marshaller.setTarget(StringUtils.isEmpty(service.getTargetPrefix()) ? operation.getName() : service.getTargetPrefix() + "." + operation.getName()); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 32cecd79feb5..069c6ca6d5a2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -350,6 +350,13 @@ public class CustomizationConfig { */ private boolean enableFastUnmarshaller; + /** + * A boolean flag to indicate if support for configuring a bearer token sourced from the environment should be added to the + * generated service. When enabled, the generated client will use bearer auth with the token sourced from the + * `AWS_BEARER_TOKEN_[SigningName]` environment variable. + */ + private boolean enableEnvironmentBearerToken = false; + private CustomizationConfig() { } @@ -924,4 +931,12 @@ public boolean getEnableFastUnmarshaller() { public void setEnableFastUnmarshaller(boolean enableFastUnmarshaller) { this.enableFastUnmarshaller = enableFastUnmarshaller; } + + public boolean isEnableEnvironmentBearerToken() { + return enableEnvironmentBearerToken; + } + + public void setEnableEnvironmentBearerToken(boolean enableEnvironmentBearerToken) { + this.enableEnvironmentBearerToken = enableEnvironmentBearerToken; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java index 073f97e05f8f..95f7f7349d95 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java @@ -17,6 +17,7 @@ import java.util.List; import java.util.Map; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; public class ServiceMetadata { @@ -36,6 +37,8 @@ public class ServiceMetadata { private String protocol; + private List protocols; + private String jsonVersion; private Map awsQueryCompatible; @@ -103,14 +106,30 @@ public void setXmlNamespace(String xmlNamespace) { this.xmlNamespace = xmlNamespace; } + /** + * {@code protocol} superseded by {@code protocols} field, resolved in {@link ProtocolUtils#resolveProtocol(ServiceMetadata)} + */ + @Deprecated public String getProtocol() { return protocol; } + /** + * {@code protocol} superseded by {@code protocols} field, resolved in {@link ProtocolUtils#resolveProtocol(ServiceMetadata)} + */ + @Deprecated public void setProtocol(String protocol) { this.protocol = protocol; } + public List getProtocols() { + return protocols; + } + + public void setProtocols(List protocols) { + this.protocols = protocols; + } + public String getJsonVersion() { return jsonVersion; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java index e5a4904f295f..efb230313efb 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java @@ -432,6 +432,22 @@ private boolean isDisallowedNameForShape(String name, Shape parentShape) { } } + @Override + public String getSigningName() { + return Optional.ofNullable(serviceModel.getMetadata().getSigningName()) + .orElseGet(() -> serviceModel.getMetadata().getEndpointPrefix()); + } + + @Override + public String getSigningNameForEnvironmentVariables() { + return screamCase(getSigningName()); + } + + @Override + public String getSigningNameForSystemProperties() { + return pascalCase(getSigningName()); + } + @Override public void validateCustomerVisibleNaming(IntermediateModel trimmedModel) { Metadata metadata = trimmedModel.getMetadata(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java index 1fe32773d71f..637920be14de 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java @@ -200,6 +200,21 @@ public interface NamingStrategy { */ String getExistenceCheckMethodName(String memberName, Shape parentShape); + /** + * Retrieve the service's signing name that should be used based on the model. + */ + String getSigningName(); + + /** + * Retrieve the service's signing name that should be used for environment variables. + */ + String getSigningNameForEnvironmentVariables(); + + /** + * Retrieve the service's signing name that should be used for system properties. + */ + String getSigningNameForSystemProperties(); + /** * Verify the customer-visible naming in the provided intermediate model will compile and is idiomatic to Java. */ diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java index b126fd2f201e..4112188c6451 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java @@ -79,6 +79,10 @@ public ClassName getUserAgentClass() { return ClassName.get(model.getMetadata().getFullClientInternalPackageName(), "UserAgentUtils"); } + public ClassName getEnvironmentTokenSystemSettingsClass() { + return ClassName.get(model.getMetadata().getFullClientInternalPackageName(), "EnvironmentTokenSystemSettings"); + } + /** * @param operationName Name of the operation * @return A Poet {@link ClassName} for the response type of a paginated operation in the base service package. diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java index 7686ef132271..043bf74ba9d9 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java @@ -49,8 +49,10 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.useragent.BusinessMetricFeatureId; import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.auth.aws.signer.RegionSet; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; @@ -70,8 +72,10 @@ public final class AuthSchemeInterceptorSpec implements ClassSpec { private final AuthSchemeSpecUtils authSchemeSpecUtils; private final EndpointRulesSpecUtils endpointRulesSpecUtils; + private final IntermediateModel intermediateModel; public AuthSchemeInterceptorSpec(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; this.authSchemeSpecUtils = new AuthSchemeSpecUtils(intermediateModel); this.endpointRulesSpecUtils = new EndpointRulesSpecUtils(intermediateModel); } @@ -99,9 +103,42 @@ public TypeSpec poetSpec() { .addMethod(generateTrySelectAuthScheme()) .addMethod(generateGetIdentityMetric()) .addMethod(putSelectedAuthSchemeMethodSpec()); + if (intermediateModel.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + builder.addMethod(generateEnvironmentTokenMetric()); + } return builder.build(); } + private MethodSpec generateEnvironmentTokenMetric() { + return MethodSpec + .methodBuilder("recordEnvironmentTokenBusinessMetric") + .addModifiers(Modifier.PRIVATE) + .addTypeVariable(TypeVariableName.get("T", Identity.class)) + .addParameter(ParameterSpec.builder( + ParameterizedTypeName.get(ClassName.get(SelectedAuthScheme.class), + TypeVariableName.get("T")), + "selectedAuthScheme").build()) + .addParameter(ExecutionAttributes.class, "executionAttributes") + .addStatement("$T tokenFromEnv = executionAttributes.getAttribute($T.TOKEN_CONFIGURED_FROM_ENV)", + String.class, SdkInternalExecutionAttribute.class) + .beginControlFlow("if (selectedAuthScheme != null && selectedAuthScheme.authSchemeOption().schemeId().equals($T" + + ".SCHEME_ID) && selectedAuthScheme.identity().isDone())", BearerAuthScheme.class) + .beginControlFlow("if (selectedAuthScheme.identity().getNow(null) instanceof $T)", TokenIdentity.class) + + .addStatement("$T configuredToken = ($T) selectedAuthScheme.identity().getNow(null)", + TokenIdentity.class, TokenIdentity.class) + .beginControlFlow("if (configuredToken.token().equals(tokenFromEnv))") + .addStatement("executionAttributes.getAttribute($T.BUSINESS_METRICS)" + + ".addMetric($T.BEARER_SERVICE_ENV_VARS.value())", + SdkInternalExecutionAttribute.class, BusinessMetricFeatureId.class) + .endControlFlow() + .endControlFlow() + .endControlFlow() + .build(); + + + } + private MethodSpec generateBeforeExecution() { MethodSpec.Builder builder = MethodSpec.methodBuilder("beforeExecution") .addAnnotation(Override.class) @@ -116,6 +153,11 @@ private MethodSpec generateBeforeExecution() { .addStatement("$T selectedAuthScheme = selectAuthScheme(authOptions, executionAttributes)", wildcardSelectedAuthScheme()) .addStatement("putSelectedAuthScheme(executionAttributes, selectedAuthScheme)"); + + if (intermediateModel.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + builder.addStatement("recordEnvironmentTokenBusinessMetric(selectedAuthScheme, " + + "executionAttributes)"); + } return builder.build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java index bc5255695ad1..b43c577e9931 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java @@ -21,6 +21,7 @@ import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; +import java.util.List; import java.util.function.Consumer; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkPublicApi; @@ -54,6 +55,7 @@ public TypeSpec poetSpec() { .addMethod(resolveAuthSchemeMethod()) .addMethod(resolveAuthSchemeConsumerBuilderMethod()) .addMethod(defaultProviderMethod()) + .addMethod(defaultPreferredProviderMethod()) .build(); } @@ -93,6 +95,17 @@ private MethodSpec defaultProviderMethod() { .build(); } + private MethodSpec defaultPreferredProviderMethod() { + return MethodSpec.methodBuilder("defaultProvider") + .addModifiers(Modifier.PUBLIC, Modifier.STATIC) + .addParameter(ParameterizedTypeName.get(List.class, String.class), "authSchemePreference") + .returns(className()) + .addJavadoc("Get the default auth scheme provider the preferred auth schemes in order of preference.") + .addStatement("return new $T(defaultProvider(), authSchemePreference)", + authSchemeSpecUtils.preferredAuthSchemeProviderName()) + .build(); + } + private CodeBlock interfaceJavadoc() { CodeBlock.Builder b = CodeBlock.builder(); @@ -105,3 +118,4 @@ private CodeBlock interfaceJavadoc() { return b.build(); } } + diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java index a02f3e8bc893..f6ea9e684b59 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java @@ -97,6 +97,15 @@ public ClassName modeledAuthSchemeProviderName() { return ClassName.get(internalPackage(), "Modeled" + providerInterfaceName().simpleName()); } + public ClassName preferredAuthSchemeProviderName() { + return ClassName.get(internalPackage(), "Preferred" + providerInterfaceName().simpleName()); + } + + public ClassName authSchemeProviderBuilderName() { + return ClassName.get(basePackage(), + intermediateModel.getMetadata().getServiceName() + "AuthSchemeProviderBuilder"); + } + public ClassName authSchemeInterceptor() { return ClassName.get(internalPackage(), intermediateModel.getMetadata().getServiceName() + "AuthSchemeInterceptor"); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java new file mode 100644 index 000000000000..0481d707d999 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeSpec; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import javax.lang.model.element.Modifier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetUtils; +import software.amazon.awssdk.utils.CollectionUtils; + +public class PreferredAuthSchemeProviderSpec implements ClassSpec { + private final AuthSchemeSpecUtils authSchemeSpecUtils; + + public PreferredAuthSchemeProviderSpec(IntermediateModel intermediateModel) { + this.authSchemeSpecUtils = new AuthSchemeSpecUtils(intermediateModel); + } + + @Override + public ClassName className() { + return authSchemeSpecUtils.preferredAuthSchemeProviderName(); + } + + @Override + public TypeSpec poetSpec() { + return PoetUtils.createClassBuilder(className()) + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .addAnnotation(SdkInternalApi.class) + .addField( + authSchemeSpecUtils.providerInterfaceName(), "delegate", + Modifier.PRIVATE, Modifier.FINAL) + .addField( + ParameterizedTypeName.get(List.class, String.class), "authSchemePreference", + Modifier.PRIVATE, Modifier.FINAL) + .addSuperinterface(authSchemeSpecUtils.providerInterfaceName()) + .addMethod(constructor()) + .addMethod(resolveAuthSchemeMethod()) + .build(); + } + + private MethodSpec constructor() { + return MethodSpec + .constructorBuilder() + .addModifiers(Modifier.PUBLIC) + .addParameter(authSchemeSpecUtils.providerInterfaceName(), "delegate") + .addParameter(ParameterizedTypeName.get(List.class, String.class), "authSchemePreference") + .addStatement("this.delegate = delegate") + .addStatement("this.authSchemePreference = authSchemePreference != null ? authSchemePreference " + + ": $T.emptyList()", + Collections.class) + .build(); + } + + private MethodSpec resolveAuthSchemeMethod() { + MethodSpec.Builder b = MethodSpec.methodBuilder("resolveAuthScheme") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .returns(authSchemeSpecUtils.resolverReturnType()) + .addParameter(authSchemeSpecUtils.parametersInterfaceName(), "params"); + b.addJavadoc("Resolve the auth schemes based on the given set of parameters."); + b.addStatement("$T candidateAuthSchemes = delegate.resolveAuthScheme(params)", + authSchemeSpecUtils.resolverReturnType()); + b.beginControlFlow("if ($T.isNullOrEmpty(authSchemePreference))", CollectionUtils.class) + .addStatement("return candidateAuthSchemes") + .endControlFlow(); + + b.addStatement("$T authSchemes = new $T<>()", authSchemeSpecUtils.resolverReturnType(), ArrayList.class); + + b.beginControlFlow("authSchemePreference.forEach(preferredSchemeId -> "); + + b.beginControlFlow("candidateAuthSchemes.stream().filter(candidate -> "); + b.addStatement("String candidateSchemeName = candidate.schemeId().contains(\"#\") ? " + + "candidate.schemeId().split(\"#\")[1] : candidate.schemeId()"); + b.addStatement("return candidateSchemeName.equals(preferredSchemeId)"); + b.endControlFlow(").findFirst().ifPresent(authSchemes::add)"); + b.endControlFlow(")"); + + b.beginControlFlow("candidateAuthSchemes.forEach(candidate -> ") + .beginControlFlow("if (!authSchemes.contains(candidate))") + .addStatement("authSchemes.add(candidate)") + .endControlFlow() + .endControlFlow(")"); + + b.addStatement("return authSchemes"); + return b.build(); + } + +} \ No newline at end of file diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index b57d7cb8d7aa..96d95f3650f8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -41,8 +41,10 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; import software.amazon.awssdk.auth.token.signer.aws.BearerTokenSigner; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -52,6 +54,7 @@ import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.model.service.ClientContextParam; import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetExtension; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; import software.amazon.awssdk.codegen.poet.auth.scheme.ModelAuthSchemeClassesKnowledgeIndex; @@ -70,7 +73,9 @@ import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.endpointdiscovery.providers.DefaultEndpointDiscoveryProviderChain; import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.Protocol; @@ -101,6 +106,8 @@ public class BaseClientBuilderClass implements ClassSpec { private final AuthSchemeSpecUtils authSchemeSpecUtils; private final ServiceClientConfigurationUtils configurationUtils; private final EndpointParamsKnowledgeIndex endpointParamsKnowledgeIndex; + private final PoetExtension poetExtensions; + public BaseClientBuilderClass(IntermediateModel model) { this.model = model; @@ -111,6 +118,7 @@ public BaseClientBuilderClass(IntermediateModel model) { this.authSchemeSpecUtils = new AuthSchemeSpecUtils(model); this.configurationUtils = new ServiceClientConfigurationUtils(model); this.endpointParamsKnowledgeIndex = EndpointParamsKnowledgeIndex.of(model); + this.poetExtensions = new PoetExtension(model); } @Override @@ -265,24 +273,24 @@ private MethodSpec serviceNameMethod() { } private MethodSpec mergeServiceDefaultsMethod() { - boolean crc32FromCompressedDataEnabled = model.getCustomizationConfig().isCalculateCrc32FromCompressedData(); - MethodSpec.Builder builder = MethodSpec.methodBuilder("mergeServiceDefaults") .addAnnotation(Override.class) .addModifiers(PROTECTED, FINAL) .returns(SdkClientConfiguration.class) - .addParameter(SdkClientConfiguration.class, "config") - .addCode("return config.merge(c -> c"); + .addParameter(SdkClientConfiguration.class, "config"); - builder.addCode(".option($T.ENDPOINT_PROVIDER, defaultEndpointProvider())", SdkClientOption.class); + boolean crc32FromCompressedDataEnabled = model.getCustomizationConfig().isCalculateCrc32FromCompressedData(); + + builder.beginControlFlow("return config.merge(c -> "); + builder.addCode("c.option($T.ENDPOINT_PROVIDER, defaultEndpointProvider())", SdkClientOption.class); if (authSchemeSpecUtils.useSraAuth()) { - builder.addCode(".option($T.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider())", SdkClientOption.class); - builder.addCode(".option($T.AUTH_SCHEMES, authSchemes())", SdkClientOption.class); - } else { - if (defaultAwsAuthSignerMethod().isPresent()) { - builder.addCode(".option($T.SIGNER, defaultSigner())\n", SdkAdvancedClientOption.class); + if (!model.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + builder.addCode(".option($T.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config))", SdkClientOption.class); } + builder.addCode(".option($T.AUTH_SCHEMES, authSchemes())", SdkClientOption.class); + } else if (defaultAwsAuthSignerMethod().isPresent()) { + builder.addCode(".option($T.SIGNER, defaultSigner())\n", SdkAdvancedClientOption.class); } builder.addCode(".option($T.CRC32_FROM_COMPRESSED_DATA_ENABLED, $L)\n", SdkClientOption.class, crc32FromCompressedDataEnabled); @@ -301,11 +309,47 @@ private MethodSpec mergeServiceDefaultsMethod() { builder.addCode(".option($T.TOKEN_SIGNER, defaultTokenSigner())", SdkAdvancedClientOption.class); } } + builder.addStatement(""); - builder.addCode(");"); + if (model.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + configureEnvironmentBearerToken(builder); + } + builder.endControlFlow(")"); return builder.build(); } + private void configureEnvironmentBearerToken(MethodSpec.Builder builder) { + if (!authSchemeSpecUtils.useSraAuth()) { + throw new IllegalStateException("The enableEnvironmentBearerToken customization requires SRA Auth."); + } + if (!AuthUtils.usesBearerAuth(model)) { + throw new IllegalStateException("The enableEnvironmentBearerToken customization requires the service to model and " + + "support smithy.api#httpBearerAuth."); + } + + builder.addStatement("$T tokenFromEnv = new $T().getStringValue()", + ParameterizedTypeName.get(Optional.class, String.class), + poetExtensions.getEnvironmentTokenSystemSettingsClass()); + + builder + .beginControlFlow("if (tokenFromEnv.isPresent() && config.option($T.AUTH_SCHEME_PROVIDER) == null && config.option($T" + + ".TOKEN_IDENTITY_PROVIDER) == null)", + SdkClientOption.class, AwsClientOption.class) + .addStatement("c.option($T.AUTH_SCHEME_PROVIDER, $T.defaultProvider($T.singletonList($S)))", + SdkClientOption.class, authSchemeSpecUtils.providerInterfaceName(), Collections.class, + "httpBearerAuth") + .addStatement("c.option($T.TOKEN_IDENTITY_PROVIDER, $T.create(tokenFromEnv::get))", + AwsClientOption.class, StaticTokenProvider.class) + .addStatement("c.option($T.EXECUTION_ATTRIBUTES, " + + "$T.builder().put($T.TOKEN_CONFIGURED_FROM_ENV, tokenFromEnv.get()).build())", + SdkClientOption.class, ExecutionAttributes.class, SdkInternalExecutionAttribute.class) + .endControlFlow() + .beginControlFlow("else") + .addStatement("c.option($T.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config))", SdkClientOption.class) + .endControlFlow(); + + } + private Optional mergeInternalDefaultsMethod() { String userAgent = model.getCustomizationConfig().getUserAgent(); RetryMode defaultRetryMode = model.getCustomizationConfig().getDefaultRetryMode(); @@ -442,7 +486,7 @@ private MethodSpec finalizeServiceConfigurationMethod() { // serviceConfigBuilder; the service configuration classes (e.g. S3Configuration) return primitive booleans that // have a default when not present. builder.addStatement("builder.option($T.DUALSTACK_ENDPOINT_ENABLED, serviceConfigBuilder.dualstackEnabled())", - AwsClientOption.class); + AwsClientOption.class); } if (model.getCustomizationConfig().getServiceConfig().hasFipsProperty()) { @@ -452,14 +496,14 @@ private MethodSpec finalizeServiceConfigurationMethod() { if (model.getEndpointOperation().isPresent()) { builder.addStatement("builder.option($T.ENDPOINT_DISCOVERY_ENABLED, endpointDiscoveryEnabled)\n", - SdkClientOption.class); + SdkClientOption.class); } if (StringUtils.isNotBlank(model.getCustomizationConfig().getCustomRetryStrategy())) { builder.addStatement("builder.option($1T.RETRY_STRATEGY, $2T.resolveRetryStrategy(config))", - SdkClientOption.class, - PoetUtils.classNameFromFqcn(model.getCustomizationConfig().getCustomRetryStrategy())); + SdkClientOption.class, + PoetUtils.classNameFromFqcn(model.getCustomizationConfig().getCustomRetryStrategy())); } if (StringUtils.isNotBlank(model.getCustomizationConfig().getCustomRetryPolicy())) { @@ -485,7 +529,7 @@ private MethodSpec finalizeServiceConfigurationMethod() { if (endpointParamsKnowledgeIndex.hasAccountIdEndpointModeBuiltIn()) { builder.addStatement("builder.option($T.$L, resolveAccountIdEndpointMode(config))", - AwsClientOption.class, model.getNamingStrategy().getEnumValueName("accountIdEndpointMode")); + AwsClientOption.class, model.getNamingStrategy().getEnumValueName("accountIdEndpointMode")); } String serviceNameForEnvVar = model.getNamingStrategy().getServiceNameForEnvironmentVariables(); @@ -829,7 +873,19 @@ private MethodSpec sigv4aSigningRegionSetMethod() { private MethodSpec defaultAuthSchemeProviderMethod() { return MethodSpec.methodBuilder("defaultAuthSchemeProvider") .addModifiers(PRIVATE) + .addParameter(SdkClientConfiguration.class, "config") .returns(authSchemeSpecUtils.providerInterfaceName()) + .addCode("$T authSchemePreferenceProvider = " + + "$T.builder()", + AuthSchemePreferenceResolver.class, AuthSchemePreferenceResolver.class) + .addCode(".profileFile(config.option($T.PROFILE_FILE_SUPPLIER))", SdkClientOption.class) + .addCode(".profileName(config.option($T.PROFILE_NAME))", SdkClientOption.class) + .addStatement(".build()") + .addStatement("List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference()") + .beginControlFlow("if(!preferences.isEmpty())") + .addStatement("return $T.defaultProvider(preferences)", + authSchemeSpecUtils.providerInterfaceName()) + .endControlFlow() .addStatement("return $T.defaultProvider()", authSchemeSpecUtils.providerInterfaceName()) .build(); } @@ -965,10 +1021,10 @@ private MethodSpec internalPluginsMethod() { List internalPlugins = model.getCustomizationConfig().getInternalPlugins(); if (internalPlugins.isEmpty()) { return builder.addStatement("return $T.emptyList()", Collections.class) - .build(); + .build(); } - builder.addStatement("$T internalPlugins = new $T<>()", parameterizedTypeName, ArrayList.class); + builder.addStatement("$T internalPlugins = new $T<>()", parameterizedTypeName, ArrayList.class); for (String internalPlugin : internalPlugins) { String arguments = internalPluginNewArguments(internalPlugin); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java new file mode 100644 index 000000000000..3ca3fb56ab41 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client; + +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.TypeSpec; +import javax.lang.model.element.Modifier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetExtension; +import software.amazon.awssdk.codegen.poet.PoetUtils; +import software.amazon.awssdk.utils.SystemSetting; + +public class EnvironmentTokenSystemSettingsClass implements ClassSpec { + protected final IntermediateModel model; + protected final PoetExtension poetExtensions; + + public EnvironmentTokenSystemSettingsClass(IntermediateModel model) { + this.model = model; + this.poetExtensions = new PoetExtension(model); + } + + @Override + public TypeSpec poetSpec() { + NamingStrategy namingStrategy = model.getNamingStrategy(); + + String systemPropertyName = "aws.bearerToken" + namingStrategy.getSigningNameForSystemProperties(); + String envName = "AWS_BEARER_TOKEN_" + namingStrategy.getSigningNameForEnvironmentVariables(); + + return TypeSpec.classBuilder(className()) + .addModifiers(Modifier.PUBLIC) + .addAnnotation(PoetUtils.generatedAnnotation()) + .addAnnotation(SdkInternalApi.class) + .addSuperinterface(SystemSetting.class) + .addMethod(MethodSpec.methodBuilder("property") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(String.class) + .addStatement("return $S", systemPropertyName) + .build()) + .addMethod(MethodSpec.methodBuilder("environmentVariable") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(String.class) + .addStatement("return $S", envName) + .build()) + .addMethod(MethodSpec.methodBuilder("defaultValue") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(String.class) + .addStatement("return null") + .build()) + .build(); + } + + @Override + public ClassName className() { + return poetExtensions.getEnvironmentTokenSystemSettingsClass(); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 5d4ef551216d..8e1afb25e8e0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -235,6 +235,10 @@ public CodeBlock executionHandler(OperationModel opModel) { codeBlock.add(RequestCompressionTrait.create(opModel, model)); + if (opModel.hasStreamingOutput()) { + codeBlock.add(".withResponseTransformer(responseTransformer)"); + } + if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L)", syncStreamingMarshaller(model, opModel, marshaller)); @@ -310,6 +314,10 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper builder.add(NoneAuthTypeRequestTrait.create(opModel)); } + if (opModel.hasStreamingOutput()) { + builder.add(".withAsyncResponseTransformer(asyncResponseTransformer)"); + } + builder.add(RequestCompressionTrait.create(opModel, model)) .add(".withInput($L)$L)", opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index b6cca23e38cf..4bf7d45d5f9c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -128,7 +128,9 @@ public CodeBlock executionHandler(OperationModel opModel) { } codeBlock.add(RequestCompressionTrait.create(opModel, intermediateModel)); - + if (opModel.hasStreamingOutput()) { + codeBlock.add(".withResponseTransformer(responseTransformer)"); + } if (opModel.hasStreamingInput()) { return codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L));", syncStreamingMarshaller(intermediateModel, opModel, marshaller)) @@ -170,6 +172,10 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper builder.add(RequestCompressionTrait.create(opModel, intermediateModel)); + if (opModel.hasStreamingOutput()) { + builder.add(".withAsyncResponseTransformer(asyncResponseTransformer)"); + } + builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", opModel.getInput().getVariableName(), opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java index 22f06d253b56..320506822765 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; import software.amazon.awssdk.codegen.poet.rules.EndpointRulesSpecUtils; +import software.amazon.awssdk.codegen.utils.AuthUtils; import software.amazon.awssdk.core.ClientEndpointProvider; import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; import software.amazon.awssdk.core.checksums.ResponseChecksumValidation; @@ -48,6 +49,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Validate; @@ -105,10 +107,34 @@ private List fields(IntermediateModel model) { authSchemeProviderField() )); fields.addAll(addCustomClientParams(model)); + fields.addAll(addModeledIdentityProviders(model)); fields.addAll(addCustomClientConfigParams(model)); return fields; } + private List addModeledIdentityProviders(IntermediateModel model) { + List identityProviderFields = new ArrayList<>(); + if (AuthUtils.usesBearerAuth(model)) { + identityProviderFields.add(tokenIdentityProviderField()); + } + return identityProviderFields; + } + + private Field tokenIdentityProviderField() { + TypeName tokenIdentityProviderType = + ParameterizedTypeName.get(ClassName.get(IdentityProvider.class), + WildcardTypeName.subtypeOf(TokenIdentity.class)); + + return fieldBuilder("tokenProvider", tokenIdentityProviderType) + .doc("token provider") + .isInherited(false) + .localSetter(basicLocalSetterCode("tokenProvider")) + .localGetter(basicLocalGetterCode("tokenProvider")) + .configSetter(basicConfigSetterCode(AwsClientOption.TOKEN_IDENTITY_PROVIDER, "tokenProvider")) + .configGetter(basicConfigGetterCode(AwsClientOption.TOKEN_IDENTITY_PROVIDER)) + .build(); + } + private List addCustomClientParams(IntermediateModel model) { List customClientParamFields = new ArrayList<>(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java index 4cd94ace20ad..04844eebaecb 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java @@ -21,6 +21,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import software.amazon.awssdk.awscore.endpoints.AwsEndpointAttribute; import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4AuthScheme; import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4aAuthScheme; @@ -32,14 +33,17 @@ public class CodeGeneratorVisitor extends WalkRuleExpressionVisitor { private final RuleRuntimeTypeMirror typeMirror; private final SymbolTable symbolTable; private final Map knownEndpointAttributes; + private final Map ruleIdToScope; public CodeGeneratorVisitor(RuleRuntimeTypeMirror typeMirror, SymbolTable symbolTable, Map knownEndpointAttributes, + Map ruleIdToScope, CodeBlock.Builder builder) { this.builder = builder; this.symbolTable = symbolTable; this.knownEndpointAttributes = knownEndpointAttributes; + this.ruleIdToScope = ruleIdToScope; this.typeMirror = typeMirror; } @@ -196,28 +200,14 @@ public Void visitRuleSetExpression(RuleSetExpression e) { @Override public Void visitLetExpression(LetExpression expr) { - for (String key : expr.bindings().keySet()) { - RuleType type = symbolTable.locals().get(key); - builder.addStatement("$T $L = null", type.javaType(), key); - } - - int count = 0; for (Map.Entry kvp : expr.bindings().entrySet()) { String k = kvp.getKey(); RuleExpression v = kvp.getValue(); - builder.add("if ("); - builder.add("($L = ", k); + RuleType type = symbolTable.locals().get(k); + builder.add("$T $L = ", type.javaType(), k); v.accept(this); - builder.add(") != null"); - - builder.beginControlFlow(")"); - builder.addStatement("locals = locals.toBuilder().$1L($1L).build()", k); - - if (++count < expr.bindings().size()) { - builder.nextControlFlow("else"); - builder.addStatement("return RuleResult.carryOn()"); - builder.endControlFlow(); - } + builder.addStatement(""); + builder.beginControlFlow("if ($L != null)", k); } return null; } @@ -235,40 +225,101 @@ private void conditionsPreamble(RuleSetExpression expr) { } private void conditionsEpilogue(RuleSetExpression expr) { - int blocksToClose = expr.conditions().size(); - for (int idx = 0; idx < blocksToClose; ++idx) { - builder.endControlFlow(); + for (RuleExpression condition : expr.conditions()) { + if (condition.kind() == RuleExpression.RuleExpressionKind.LET) { + LetExpression let = (LetExpression) condition; + for (int x = 0; x < let.bindings().size(); x++) { + builder.endControlFlow(); + } + } else { + builder.endControlFlow(); + } } - if (!expr.conditions().isEmpty()) { + if (needsReturn(expr)) { builder.addStatement("return $T.carryOn()", typeMirror.rulesResult().type()); } } + private boolean needsReturn(RuleSetExpression expr) { + // If the expression can be inlined, then it doesn't live in + // its own method, no return at the end required + if (canBeInlined(expr)) { + return false; + } + // If the expression has conditions all be be wrapped in + // if-blocks, thus at the end of the method we need to return + // carryOn() + if (!expr.conditions().isEmpty()) { + return true; + } + // If the expression doesn't have any conditions, and doesn't + // have any children then we need to return carryOn(). This + // case SHOULD NOT happen but we assume below that there are + // children, thus adding the test here. + if (expr.children().isEmpty()) { + return true; + } + // We have children, check the last one. + int size = expr.children().size(); + RuleSetExpression child = expr.children().get(size - 1); + // If a tree then we don't need a return. + if (child.isTree()) { + return false; + } + // The child is not a tree, so it was inlined. Check if it + // does have any conditions, if it so, its body will be inside + // a block already so we need to return after it. + return !child.conditions().isEmpty(); + } + private void codegenTreeBody(RuleSetExpression expr) { List children = expr.children(); int size = children.size(); + boolean isFirst = true; for (int idx = 0; idx < size; ++idx) { RuleSetExpression child = children.get(idx); + if (canBeInlined(child)) { + child.accept(this); + continue; + } boolean isLast = idx == size - 1; if (isLast) { - builder.addStatement("return $L(params, locals)", - child.ruleId()); + builder.addStatement("return $L($L)", + child.ruleId(), + callParams(child.ruleId())); continue; } - boolean isFirst = idx == 0; + if (isFirst) { - builder.addStatement("$T result = $L(params, locals)", + isFirst = false; + builder.addStatement("$T result = $L($L)", typeMirror.rulesResult().type(), - child.ruleId()); + child.ruleId(), + callParams(child.ruleId())); } else { - builder.addStatement("result = $L(params, locals)", - child.ruleId()); + builder.addStatement("result = $L($L)", + child.ruleId(), + callParams(child.ruleId())); } builder.beginControlFlow("if (result.isResolved())") .addStatement("return result") .endControlFlow(); } + } + private boolean canBeInlined(RuleSetExpression child) { + return !child.isTree(); + } + + private String callParams(String ruleId) { + ComputeScopeTree.Scope scope = ruleIdToScope.get(ruleId); + String args = scope.usesLocals().stream() + .filter(a -> !scope.defines().contains(a)) + .collect(Collectors.joining(", ")); + if (args.isEmpty()) { + return "params"; + } + return "params, " + args; } @Override @@ -381,7 +432,6 @@ private void addAttributeBlock(String k, RuleExpression v) { builder.add(")"); } - public CodeBlock.Builder builder() { return builder; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java index 57aa63980d2b..6488f015ad7b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java @@ -21,10 +21,16 @@ public final class CodegenExpressionBuidler { private final RuleSetExpression root; private final SymbolTable symbolTable; + private final Map scopesByName; - public CodegenExpressionBuidler(RuleSetExpression root, SymbolTable symbolTable) { + public CodegenExpressionBuidler( + RuleSetExpression root, + SymbolTable symbolTable, + Map scopesByName + ) { this.root = root; this.symbolTable = symbolTable; + this.scopesByName = scopesByName; } public static CodegenExpressionBuidler from(RuleSetExpression root, RuleRuntimeTypeMirror typeMirror, SymbolTable table) { @@ -36,10 +42,17 @@ public static CodegenExpressionBuidler from(RuleSetExpression root, RuleRuntimeT } table = assignTypesVisitor.symbolTable(); root = assignIdentifier(root); - PrepareForCodegenVisitor prepareForCodegenVisitor = new PrepareForCodegenVisitor(table); - root = (RuleSetExpression) root.accept(prepareForCodegenVisitor); - table = prepareForCodegenVisitor.symbolTable(); - return new CodegenExpressionBuidler(root, table); + + RenameForCodegenVisitor renameForCodegenVisitor = new RenameForCodegenVisitor(table); + root = (RuleSetExpression) root.accept(renameForCodegenVisitor); + table = renameForCodegenVisitor.symbolTable(); + + ComputeScopeTree computeScopeTree = new ComputeScopeTree(table); + root.accept(computeScopeTree); + + PrepareForCodegenVisitor prepareForCodegenVisitor = new PrepareForCodegenVisitor(); + RuleSetExpression newRoot = (RuleSetExpression) root.accept(prepareForCodegenVisitor); + return new CodegenExpressionBuidler(newRoot, table, computeScopeTree.scopesByName()); } private static RuleSetExpression assignIdentifier(RuleSetExpression root) { @@ -51,27 +64,15 @@ public RuleSetExpression root() { return root; } - public boolean isParam(String name) { - return symbolTable.isParam(name); - } - - public boolean isLocal(String name) { - return symbolTable.isLocal(name); - } - public String regionParamName() { return symbolTable.regionParamName(); } - public Map locals() { - return symbolTable.locals(); - } - - public Map params() { - return symbolTable.params(); - } - public SymbolTable symbolTable() { return symbolTable; } + + public Map scopesByName() { + return scopesByName; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java new file mode 100644 index 000000000000..42f612f87418 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java @@ -0,0 +1,193 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.rules2; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Computes all the symbols, locals and params, used by each of the rules, either directly or transitively. + */ +public final class ComputeScopeTree extends WalkRuleExpressionVisitor { + private final SymbolTable symbolTable; + private final Deque scopes = new ArrayDeque<>(); + private final Map scopesByName = new HashMap<>(); + private Scope result; + + public ComputeScopeTree(SymbolTable symbolTable) { + this.symbolTable = symbolTable; + } + + /** + * Returns the root scope. + */ + public Scope result() { + return result; + } + + /** + * Returns the mapping between rule id and scope. + */ + public Map scopesByName() { + return scopesByName; + } + + @Override + public Void visitRuleSetExpression(RuleSetExpression node) { + ScopeBuilder scopeBuilder = new ScopeBuilder(); + scopeBuilder.ruleId(node.ruleId()); + scopes.push(scopeBuilder); + super.visitRuleSetExpression(node); + result = scopes.pop().build(); + scopesByName.put(result.ruleId(), result); + if (!scopes.isEmpty()) { + scopes.peekFirst().addChild(result); + } + return null; + } + + @Override + public Void visitVariableReferenceExpression(VariableReferenceExpression e) { + String variableName = e.variableName(); + ScopeBuilder current = scopes.peekFirst(); + if (symbolTable.isLocal(variableName)) { + current.usesLocal(variableName); + } else if (symbolTable.isParam(variableName)) { + current.usesParam(variableName); + } + return null; + } + + @Override + public Void visitLetExpression(LetExpression e) { + ScopeBuilder scopeBuilder = scopes.peekFirst(); + for (String binding : e.bindings().keySet()) { + scopeBuilder.defines(binding); + } + return super.visitLetExpression(e); + } + + public static class Scope { + private final String ruleId; + private final Set defines; + private final Set usesLocals; + private final Set usesParams; + private final List children; + + public Scope(ScopeBuilder builder) { + this.ruleId = Objects.requireNonNull(builder.ruleId, "ruleId cannot be null"); + this.defines = Collections.unmodifiableSet(new LinkedHashSet<>(builder.defines)); + this.usesLocals = Collections.unmodifiableSet(new LinkedHashSet<>(builder.usesLocals)); + this.usesParams = Collections.unmodifiableSet(new LinkedHashSet<>(builder.usesParams)); + this.children = Collections.unmodifiableList(new ArrayList<>(builder.children)); + } + + public String ruleId() { + return ruleId; + } + + public Set defines() { + return defines; + } + + public Set usesLocals() { + return usesLocals; + } + + public Set usesParams() { + return usesParams; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + appendTo(0, builder); + return builder.toString(); + } + + public void appendTo(int level, StringBuilder sb) { + String prefix = levelValue(level); + sb.append(prefix).append("=========================================\n"); + sb.append(prefix).append("rule ").append(ruleId).append("\n"); + sb.append(prefix).append("defines ").append(defines).append("\n"); + sb.append(prefix).append("uses ").append(usesLocals).append("\n"); + for (Scope child : children) { + child.appendTo(level + 1, sb); + } + } + + private String levelValue(int level) { + StringBuilder result = new StringBuilder(); + for (int i = 0; i < level; i++) { + result.append(" "); + } + return result.toString(); + } + } + + public static class ScopeBuilder { + private String ruleId; + private final Set defines = new LinkedHashSet<>(); + private final Set usesLocals = new LinkedHashSet<>(); + private final Set usesParams = new LinkedHashSet<>(); + private final List children = new ArrayList<>(); + + public ScopeBuilder ruleId(String ruleId) { + this.ruleId = ruleId; + return this; + } + + public ScopeBuilder defines(String define) { + defines.add(define); + return this; + } + + public ScopeBuilder usesLocal(String use) { + usesLocals.add(use); + return this; + } + + public ScopeBuilder usesParam(String use) { + usesParams.add(use); + return this; + } + + public ScopeBuilder addChild(Scope child) { + children.add(child); + for (String local : child.usesLocals) { + if (!child.defines.contains(local)) { + usesLocals.add(local); + } + } + for (String param : child.usesParams) { + usesParams.add(param); + } + return this; + } + + public Scope build() { + return new Scope(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java index adbed805cc40..831b8d88af83 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java @@ -123,8 +123,6 @@ public TypeSpec poetSpec() { .addSuperinterface(endpointRulesSpecUtils.providerInterfaceName()) .addAnnotation(SdkInternalApi.class); - builder.addType(codegenLocalState()); - builder.addType(codegenLocalStateBuilder()); builder.addMethod(resolveEndpointMethod()); List methods = new ArrayList<>(); createRuleMethod(utils.root(), methods); @@ -154,11 +152,11 @@ private MethodSpec resolveEndpointMethod() { builder.beginControlFlow("try"); String regionParamName = utils.regionParamName(); if (regionParamName != null) { - builder.addStatement("$T result = $L(params, new $T(params.$L()))", ruleResult(), utils.root().ruleId(), - ClassName.bestGuess("LocalState"), regionParamName); + builder.addStatement("$T region = params.$L()", Region.class, regionParamName); + builder.addStatement("$T regionId = region == null ? null : region.id()", String.class); + builder.addStatement("$T result = $L(params, regionId)", ruleResult(), utils.root().ruleId()); } else { - builder.addStatement("$T result = $L(params, new $T())", ruleResult(), utils.root().ruleId(), - ClassName.bestGuess("LocalState")); + builder.addStatement("$T result = $L(params)", ruleResult(), utils.root().ruleId()); } builder.beginControlFlow("if (result.canContinue())") .addStatement("throw $T.create($S)", SdkClientException.class, "Rule engine did not reach an error or " @@ -206,7 +204,9 @@ private void createRuleMethod(RuleSetExpression expr, List m builder.addCode(block.build()); if (expr.isTree()) { for (RuleSetExpression child : expr.children()) { - createRuleMethod(child, methods); + if (child.isTree()) { + createRuleMethod(child, methods); + } } } } @@ -215,110 +215,26 @@ private MethodSpec.Builder methodBuilderForRule(RuleSetExpression expr) { MethodSpec.Builder builder = MethodSpec.methodBuilder(expr.ruleId()) .addModifiers(Modifier.PRIVATE, Modifier.STATIC) - .returns(ruleResult()) - .addParameter(endpointRulesSpecUtils.parametersClassName(), "params"); - builder.addParameter(ClassName.bestGuess("LocalState"), "locals"); + .returns(ruleResult()); + ComputeScopeTree.Scope scope = utils.scopesByName().get(expr.ruleId()); + builder.addParameter(endpointRulesSpecUtils.parametersClassName(), "params"); + for (String param : scope.usesLocals()) { + if (scope.defines().contains(param)) { + continue; + } + RuleType type = utils.symbolTable().localType(param); + builder.addParameter(type.javaType(), param); + } return builder; } - private void codegenExpr(RuleExpression expr, CodeBlock.Builder builder) { + private void codegenExpr(RuleSetExpression expr, CodeBlock.Builder builder) { CodeGeneratorVisitor visitor = new CodeGeneratorVisitor(typeMirror, utils.symbolTable(), knownEndpointAttributes, + utils.scopesByName(), builder); - expr.accept(visitor); - } - - private TypeSpec codegenLocalState() { - TypeSpec.Builder b = TypeSpec.classBuilder("LocalState") - .addModifiers(Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL); - Map locals = utils.locals(); - locals.forEach((k, v) -> { - b.addField(v.javaType(), k, Modifier.PRIVATE, Modifier.FINAL); - }); - MethodSpec.Builder emptyCtor = MethodSpec.constructorBuilder(); - locals.forEach((k, v) -> { - emptyCtor.addStatement("this.$1L = null", k); - }); - b.addMethod(emptyCtor.build()); - String regionParamName = utils.regionParamName(); - if (regionParamName != null) { - MethodSpec.Builder regionCtor = MethodSpec.constructorBuilder() - .addParameter(Region.class, "region"); - locals.forEach((k, v) -> { - if (k.equals(regionParamName)) { - regionCtor.beginControlFlow("if (region != null)") - .addStatement("this.$L = region.id()", regionParamName) - .nextControlFlow("else") - .addStatement("this.$L = null", regionParamName) - .endControlFlow(); - } else { - regionCtor.addStatement("this.$1L = null", k); - } - }); - b.addMethod(regionCtor.build()); - - } - ClassName localStateBuilder = ClassName.bestGuess("LocalStateBuilder"); - MethodSpec.Builder builderCtor = MethodSpec - .constructorBuilder() - .addParameter(localStateBuilder, "builder"); - - locals.forEach((k, v) -> { - builderCtor.addStatement("this.$1L = builder.$1L", k); - }); - - b.addMethod(builderCtor.build()); - locals.forEach((k, v) -> { - b.addMethod(MethodSpec.methodBuilder(k) - .addModifiers(Modifier.PUBLIC) - .returns(v.javaType()) - .addStatement("return this.$L", k) - .build()); - }); - b.addMethod(MethodSpec.methodBuilder("toBuilder") - .addModifiers(Modifier.PUBLIC) - .returns(localStateBuilder) - .addStatement("return new $T(this)", localStateBuilder) - .build()); - return b.build(); - } - - private TypeSpec codegenLocalStateBuilder() { - ClassName localStateClass = ClassName.bestGuess("LocalState"); - ClassName builderClass = ClassName.bestGuess("LocalStateBuilder"); - TypeSpec.Builder b = TypeSpec.classBuilder("LocalStateBuilder") - .addModifiers(Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL); - Map locals = utils.locals(); - locals.forEach((k, v) -> { - b.addField(v.javaType(), k, Modifier.PRIVATE); - }); - MethodSpec.Builder emptyCtor = MethodSpec.constructorBuilder(); - locals.forEach((k, v) -> { - emptyCtor.addStatement("this.$1L = null", k); - }); - b.addMethod(emptyCtor.build()); - MethodSpec.Builder stateCtor = MethodSpec - .constructorBuilder() - .addParameter(localStateClass, "locals"); - locals.forEach((k, v) -> { - stateCtor.addStatement("this.$1L = locals.$1L", k); - }); - b.addMethod(stateCtor.build()); - locals.forEach((k, v) -> { - b.addMethod(MethodSpec.methodBuilder(k) - .addModifiers(Modifier.PUBLIC) - .returns(builderClass) - .addParameter(v.javaType(), "value") - .addStatement("this.$L = value", k) - .addStatement("return this") - .build()); - }); - b.addMethod(MethodSpec.methodBuilder("build") - .returns(localStateClass) - .addStatement("return new $T(this)", localStateClass) - .build()); - return b.build(); + visitor.visitRuleSetExpression(expr); } private TypeName ruleResult() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java index 23c3c4ad7c43..a453812bd5a0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java @@ -16,29 +16,13 @@ package software.amazon.awssdk.codegen.poet.rules2; import java.util.List; -import software.amazon.awssdk.codegen.internal.Utils; -import software.amazon.awssdk.utils.internal.CodegenNamingUtils; /** - * Visitor that rewrites some expressions in preparation for codegen and also renaming locals assignments to use idiomatic java - * names. This visitor in particular rewrites variable references to the equivalent to {@code getAttr(params, NAME)} or {@code - * getAttr(locals, NAME)}, depending on whether the reference is an endpoint params variable or a locally assigned one. + * Visitor that rewrites expressions in preparation for codegen. */ public final class PrepareForCodegenVisitor extends RewriteRuleExpressionVisitor { - private final SymbolTable symbolTable; - private final SymbolTable.Builder renames; - public PrepareForCodegenVisitor(SymbolTable symbolTable) { - this.symbolTable = symbolTable; - this.renames = SymbolTable.builder(); - } - - public SymbolTable symbolTable() { - String regionParamName = symbolTable.regionParamName(); - if (regionParamName != null) { - renames.regionParamName(javaName(regionParamName)); - } - return renames.build(); + public PrepareForCodegenVisitor() { } @Override @@ -72,34 +56,6 @@ public RuleExpression visitFunctionCallExpression(FunctionCallExpression e) { } } - @Override - public RuleExpression visitVariableReferenceExpression(VariableReferenceExpression e) { - String name = e.variableName(); - if (symbolTable.isLocal(name)) { - RuleType type = symbolTable.localType(name); - String newName = javaName(name); - renames.putLocal(newName, type); - return MemberAccessExpression - .builder() - .type(e.type()) - .source(VariableReferenceExpression.builder().variableName("locals").build()) - .name(newName) - .build(); - } - if (symbolTable.isParam(name)) { - RuleType type = symbolTable.paramType(name); - String newName = javaName(name); - renames.putParam(newName, type); - return MemberAccessExpression - .builder() - .type(e.type()) - .source(VariableReferenceExpression.builder().variableName("params").build()) - .name(newName) - .build(); - } - return e; - } - @Override public RuleExpression visitIndexedAccessExpression(IndexedAccessExpression e) { e = (IndexedAccessExpression) super.visitIndexedAccessExpression(e); @@ -112,18 +68,6 @@ public RuleExpression visitIndexedAccessExpression(IndexedAccessExpression e) { .build(); } - @Override - public RuleExpression visitLetExpression(LetExpression e) { - LetExpression.Builder builder = LetExpression.builder(); - e.bindings().forEach((k, v) -> { - String newName = javaName(k); - RuleExpression value = v.accept(this); - builder.putBinding(newName, value); - renames.putLocal(newName, value.type()); - }); - return builder.build(); - } - /** * Transforms the following expressions: *
    @@ -212,8 +156,4 @@ private RuleExpression simplifyNotExpression(FunctionCallExpression e) { } return e; } - - private String javaName(String name) { - return Utils.unCapitalize(CodegenNamingUtils.pascalCase(name)); - } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java new file mode 100644 index 000000000000..1c09b51c398a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java @@ -0,0 +1,86 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.rules2; + +import software.amazon.awssdk.codegen.internal.Utils; +import software.amazon.awssdk.utils.internal.CodegenNamingUtils; + +/** + * Visitor that renames locals assignments to use idiomatic java names. This visitor also rewrites variable references to + * the equivalent to {@code getAttr(params, NAME)}, to call the getter method in the params. + */ +public final class RenameForCodegenVisitor extends RewriteRuleExpressionVisitor { + private final SymbolTable symbolTable; + private final SymbolTable.Builder renames; + + public RenameForCodegenVisitor(SymbolTable symbolTable) { + this.symbolTable = symbolTable; + this.renames = SymbolTable.builder(); + } + + /** + * Returns the new symbol table with the renamed symbols. + */ + public SymbolTable symbolTable() { + String regionParamName = symbolTable.regionParamName(); + if (regionParamName != null) { + renames.regionParamName(javaName(regionParamName)); + } + return renames.build(); + } + + @Override + public RuleExpression visitVariableReferenceExpression(VariableReferenceExpression e) { + String name = e.variableName(); + if (symbolTable.isLocal(name)) { + RuleType type = symbolTable.localType(name); + String newName = javaName(name); + renames.putLocal(newName, type); + return VariableReferenceExpression + .builder() + .variableName(newName) + .build(); + } + if (symbolTable.isParam(name)) { + RuleType type = symbolTable.paramType(name); + String newName = javaName(name); + renames.putParam(newName, type); + return MemberAccessExpression + .builder() + .type(e.type()) + .source(VariableReferenceExpression.builder().variableName("params").build()) + .name(newName) + .build(); + } + return e; + } + + @Override + public RuleExpression visitLetExpression(LetExpression e) { + LetExpression.Builder builder = LetExpression.builder(); + e.bindings().forEach((k, v) -> { + String newName = javaName(k); + RuleExpression value = v.accept(this); + builder.putBinding(newName, value); + renames.putLocal(newName, value.type()); + }); + return builder.build(); + } + + private String javaName(String name) { + return Utils.unCapitalize(CodegenNamingUtils.pascalCase(name)); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java index f7682eaec0cd..13a3ccafc51b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java @@ -157,6 +157,16 @@ public boolean isTree() { return endpoint == null && error == null; } + public String category() { + if (isEndpoint()) { + return "endpoint"; + } + if (isError()) { + return "error"; + } + return "tree"; + } + @Override public RuleType type() { return type; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java index fa59498b61a1..de952b6d67da 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java @@ -93,7 +93,7 @@ public Void visitRuleSetExpression(RuleSetExpression e) { visitAll(e.conditions()); ErrorExpression error = e.error(); if (error != null) { - e.accept(this); + error.accept(this); } EndpointExpression endpoint = e.endpoint(); if (endpoint != null) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java index f870ceea284d..004d64fac245 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java @@ -68,7 +68,8 @@ public static boolean isOpBearerAuth(IntermediateModel model, OperationModel opM } private static boolean isServiceBearerAuth(IntermediateModel model) { - return model.getMetadata().getAuthType() == AuthType.BEARER; + return model.getMetadata().getAuthType() == AuthType.BEARER || + (model.getMetadata().getAuth() != null && model.getMetadata().getAuth().contains(AuthType.BEARER)); } private static boolean isServiceSigv4a(IntermediateModel model) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java new file mode 100644 index 000000000000..0c3dfbccf2e8 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.utils; + +import java.util.Arrays; +import java.util.List; +import software.amazon.awssdk.codegen.model.service.ServiceMetadata; + +/** + * Resolves the protocol from the service model {@code protocol} and {@code protocols} fields. + */ +public final class ProtocolUtils { + + /** + * Priority-ordered list of protocols supported by the SDK. + */ + private static final List SUPPORTED_PROTOCOLS = Arrays.asList( + "smithy-rpc-v2-cbor", "json", "rest-json", "rest-xml", "query", "ec2"); + + private ProtocolUtils() { + } + + /** + * {@code protocols} supersedes {@code protocol}. The highest priority protocol supported by the SDK that is present in the + * service model {@code protocols} list will be selected. If none of the values in {@code protocols} is supported by the + * SDK, an error will be thrown. If {@code protocols} is empty or null, the value from {@code protocol} will be returned. + */ + public static String resolveProtocol(ServiceMetadata serviceMetadata) { + + List protocols = serviceMetadata.getProtocols(); + String protocol = serviceMetadata.getProtocol(); + + if (protocols == null || protocols.isEmpty()) { + return protocol; + } + + // Kinesis uses customization.config customServiceMetadata to set cbor + if ("cbor".equals(protocols.get(0))) { + return "cbor"; + } + + for (String supportedProtocol : SUPPORTED_PROTOCOLS) { + if (protocols.contains(supportedProtocol)) { + return supportedProtocol; + } + } + + throw new IllegalArgumentException("The SDK does not support any of provided protocols: " + protocols); + } +} diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource index a2bfa6ead490..456b07fca676 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource @@ -17,6 +17,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java index 6b8756474405..cec5a7fd4bb2 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java @@ -337,6 +337,39 @@ public void validateAllowsUnderscoresWithCustomization() { strategy.validateCustomerVisibleNaming(model); } + @Test + public void getSigningNameForEnvironmentVariables_convertsDashAndUppercases() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn("signing-name"); + + assertThat(strat.getSigningNameForEnvironmentVariables()).isEqualTo("SIGNING_NAME"); + } + + @Test + public void getSigningNameForSystemProperties_convertsDashAndUppercasesWords() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn("signing-name"); + + assertThat(strat.getSigningNameForSystemProperties()).isEqualTo("SigningName"); + } + + @Test + public void getSigningName_Uses_EndpointPrefix_whenSigningNameUnset() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn(null); + when(serviceMetadata.getEndpointPrefix()).thenReturn("EndpointPrefixFoo"); + + assertThat(strat.getSigningName()).isEqualTo("EndpointPrefixFoo"); + } + + @Test + public void getSigningName_Uses_SigningName() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn("Foo"); + + assertThat(strat.getSigningName()).isEqualTo("Foo"); + } + @Test public void validateServiceIdentifiersForEnvVarsAndProfileProperty() { when(serviceModel.getMetadata()).thenReturn(serviceMetadata); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java index afcc888e77d9..262f3e74de2b 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java @@ -97,6 +97,22 @@ public static IntermediateModel bearerAuthServiceModels() { return new IntermediateModelBuilder(models).build(); } + public static IntermediateModel envBearerTokenServiceModels() { + File serviceModel = new File(ClientTestModels.class.getResource( + "client/c2j/json-bearer-auth/service-2.json").getFile()); + File customizationModel = new File(ClientTestModels.class.getResource( + "client/c2j/json-bearer-auth/customization-env-bearer-token.config").getFile()); + File paginatorsModel = new File(ClientTestModels.class.getResource( + "client/c2j/json-bearer-auth/paginators.json").getFile()); + C2jModels models = C2jModels.builder() + .serviceModel(getServiceModel(serviceModel)) + .customizationConfig(getCustomizationConfig(customizationModel)) + .paginatorsModel(getPaginatorsModel(paginatorsModel)) + .build(); + + return new IntermediateModelBuilder(models).build(); + } + public static IntermediateModel restJsonServiceModels() { File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/rest-json/service-2.json").getFile()); File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/rest-json/customization.config").getFile()); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java index dba6bca98c74..3e2807600928 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java @@ -66,6 +66,12 @@ static List parameters() { .caseName("query") .outputFileSuffix("default-params") .build(), + TestCase.builder() + .modelProvider(ClientTestModels::queryServiceModels) + .classSpecProvider(PreferredAuthSchemeProviderSpec::new) + .caseName("query") + .outputFileSuffix("preferred-provider") + .build(), // query-endpoint-auth-params TestCase.builder() .modelProvider(ClientTestModels::queryServiceModelsEndpointAuthParamsWithAllowList) @@ -214,6 +220,13 @@ static List parameters() { .classSpecProvider(AuthSchemeInterceptorSpec::new) .caseName("ops-auth-sigv4a-value") .outputFileSuffix("interceptor") + .build(), + // service with environment bearer token enabled + TestCase.builder() + .modelProvider(ClientTestModels::envBearerTokenServiceModels) + .classSpecProvider(AuthSchemeInterceptorSpec::new) + .caseName("env-bearer-token") + .outputFileSuffix("interceptor") .build() ); } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java index a09271f4001a..423ae5aba59a 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java @@ -17,6 +17,7 @@ import static software.amazon.awssdk.codegen.poet.ClientTestModels.bearerAuthServiceModels; import static software.amazon.awssdk.codegen.poet.ClientTestModels.composedClientJsonServiceModels; +import static software.amazon.awssdk.codegen.poet.ClientTestModels.envBearerTokenServiceModels; import static software.amazon.awssdk.codegen.poet.ClientTestModels.internalConfigModels; import static software.amazon.awssdk.codegen.poet.ClientTestModels.operationWithNoAuth; import static software.amazon.awssdk.codegen.poet.ClientTestModels.opsWithSigv4a; @@ -87,6 +88,13 @@ void baseClientBuilderClassWithBearerAuth_sra() { validateBaseClientBuilderClassGeneration(bearerAuthServiceModels(), "test-bearer-auth-client-builder-class.java", true); } + @Test + void baseClientBuilderClassWithEnvBearerToken_sra() { + validateBaseClientBuilderClassGeneration(envBearerTokenServiceModels(), + "test-env-bearer-token-client-builder-class.java", + true); + } + @Test void baseClientBuilderClassWithNoAuthOperation_sra() { validateBaseClientBuilderClassGeneration(operationWithNoAuth(), "test-no-auth-ops-client-builder-class.java", true); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java new file mode 100644 index 000000000000..3946483248a6 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java @@ -0,0 +1,31 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static software.amazon.awssdk.codegen.poet.PoetMatchers.generatesTo; + +import org.junit.Test; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.ClientTestModels; + +public class EnvironmentTokenSystemSettingsClassTest { + @Test + public void testEnvironmentTokenSystemSettingsClass() { + ClassSpec classSpec = new EnvironmentTokenSystemSettingsClass(ClientTestModels.restJsonServiceModels()); + assertThat(classSpec, generatesTo("test-environment-token-system-settings-class.java")); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java index 66e2311978ee..f93f0172fbc9 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java @@ -18,6 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -30,15 +31,17 @@ import software.amazon.awssdk.codegen.model.intermediate.Metadata; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.model.service.AuthType; +import software.amazon.awssdk.utils.CollectionUtils; public class AuthUtilsTest { @ParameterizedTest @MethodSource("serviceValues") public void testIfServiceHasBearerAuth(AuthType serviceAuthType, + List serviceAuthTypes, List opAuthTypes, Boolean expectedResult) { - IntermediateModel model = modelWith(serviceAuthType); + IntermediateModel model = modelWith(serviceAuthType, serviceAuthTypes); model.setOperations(createOperations(opAuthTypes)); assertThat(AuthUtils.usesBearerAuth(model)).isEqualTo(expectedResult); } @@ -47,10 +50,11 @@ private static Stream serviceValues() { List oneBearerOp = Arrays.asList(AuthType.BEARER, AuthType.S3V4, AuthType.NONE); List noBearerOp = Arrays.asList(AuthType.S3V4, AuthType.S3V4, AuthType.NONE); - return Stream.of(Arguments.of(AuthType.BEARER, noBearerOp, true), - Arguments.of(AuthType.BEARER, oneBearerOp, true), - Arguments.of(AuthType.S3V4, noBearerOp, false), - Arguments.of(AuthType.S3V4, oneBearerOp, true)); + return Stream.of(Arguments.of(AuthType.BEARER, Collections.emptyList(), noBearerOp, true), + Arguments.of(AuthType.BEARER, Collections.emptyList(), oneBearerOp, true), + Arguments.of(AuthType.S3V4, Collections.emptyList(), noBearerOp, false), + Arguments.of(AuthType.S3V4, Collections.emptyList(), oneBearerOp, true), + Arguments.of(AuthType.S3V4, oneBearerOp, noBearerOp, true)); } @ParameterizedTest @@ -106,6 +110,12 @@ private static IntermediateModel modelWith(AuthType authType) { return model; } + private static IntermediateModel modelWith(AuthType authType, List authTypes) { + IntermediateModel model = modelWith(authType); + model.getMetadata().setAuth(authTypes); + return model; + } + private static Map createOperations(List opAuthTypes) { return IntStream.range(0, opAuthTypes.size()) .boxed() diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java new file mode 100644 index 000000000000..9d714de98e4d --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.utils; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; +import org.junit.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.codegen.model.service.ServiceMetadata; + +public class ProtocolUtilsTest { + + @ParameterizedTest + @MethodSource("protocolsValues") + public void protocolSelection(List protocols, String expectedProtocol) { + ServiceMetadata serviceMetadata = serviceMetadata(protocols); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo(expectedProtocol); + } + + @Test + public void emptyProtocolsWithPresentProtocol() { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocol("json"); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo("json"); + } + + @Test + public void protocolsWithJson_protocolCborV2_selectsJson() { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocols(Collections.singletonList("json")); + serviceMetadata.setProtocol("smithy-rpc-v2-cbor"); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo("json"); + } + + @Test + public void protocolsWithCborV1_protocolJson_selectsCborV1() { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocols(Collections.singletonList("cbor")); + serviceMetadata.setProtocol("json"); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo("cbor"); + } + + private static Stream protocolsValues() { + return Stream.of(Arguments.of(Arrays.asList("smithy-rpc-v2-cbor", "json"), "smithy-rpc-v2-cbor"), + Arguments.of(Collections.singletonList("smithy-rpc-v2-cbor"), "smithy-rpc-v2-cbor"), + Arguments.of(Arrays.asList("smithy-rpc-v2-cbor", "json", "query"), "smithy-rpc-v2-cbor"), + Arguments.of(Arrays.asList("json", "query"), "json"), + Arguments.of(Collections.singletonList("query"), "query")); + } + + private static ServiceMetadata serviceMetadata(List protocols) { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocols(protocols); + return serviceMetadata; + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java new file mode 100644 index 000000000000..a7f8a8e8d336 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java @@ -0,0 +1,164 @@ +package software.amazon.awssdk.services.json.auth.scheme.internal; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.useragent.BusinessMetricFeatureId; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.identity.spi.Identity; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.ResolveIdentityRequest; +import software.amazon.awssdk.identity.spi.TokenIdentity; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeParams; +import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeProvider; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class JsonAuthSchemeInterceptor implements ExecutionInterceptor { + private static Logger LOG = Logger.loggerFor(JsonAuthSchemeInterceptor.class); + + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + List authOptions = resolveAuthOptions(context, executionAttributes); + SelectedAuthScheme selectedAuthScheme = selectAuthScheme(authOptions, executionAttributes); + putSelectedAuthScheme(executionAttributes, selectedAuthScheme); + recordEnvironmentTokenBusinessMetric(selectedAuthScheme, executionAttributes); + } + + private List resolveAuthOptions(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + JsonAuthSchemeProvider authSchemeProvider = Validate.isInstanceOf(JsonAuthSchemeProvider.class, + executionAttributes.getAttribute(SdkInternalExecutionAttribute.AUTH_SCHEME_RESOLVER), + "Expected an instance of JsonAuthSchemeProvider"); + JsonAuthSchemeParams params = authSchemeParams(context.request(), executionAttributes); + return authSchemeProvider.resolveAuthScheme(params); + } + + private SelectedAuthScheme selectAuthScheme(List authOptions, + ExecutionAttributes executionAttributes) { + MetricCollector metricCollector = executionAttributes.getAttribute(SdkExecutionAttribute.API_CALL_METRIC_COLLECTOR); + Map> authSchemes = executionAttributes.getAttribute(SdkInternalExecutionAttribute.AUTH_SCHEMES); + IdentityProviders identityProviders = executionAttributes.getAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS); + List> discardedReasons = new ArrayList<>(); + for (AuthSchemeOption authOption : authOptions) { + AuthScheme authScheme = authSchemes.get(authOption.schemeId()); + SelectedAuthScheme selectedAuthScheme = trySelectAuthScheme(authOption, authScheme, + identityProviders, discardedReasons, metricCollector, executionAttributes); + if (selectedAuthScheme != null) { + if (!discardedReasons.isEmpty()) { + LOG.debug(() -> String.format("%s auth will be used, discarded: '%s'", authOption.schemeId(), + discardedReasons.stream().map(Supplier::get).collect(Collectors.joining(", ")))); + } + return selectedAuthScheme; + } + } + throw SdkException + .builder() + .message( + "Failed to determine how to authenticate the user: " + + discardedReasons.stream().map(Supplier::get).collect(Collectors.joining(", "))).build(); + } + + private JsonAuthSchemeParams authSchemeParams(SdkRequest request, ExecutionAttributes executionAttributes) { + String operation = executionAttributes.getAttribute(SdkExecutionAttribute.OPERATION_NAME); + JsonAuthSchemeParams.Builder builder = JsonAuthSchemeParams.builder().operation(operation); + return builder.build(); + } + + private SelectedAuthScheme trySelectAuthScheme(AuthSchemeOption authOption, AuthScheme authScheme, + IdentityProviders identityProviders, List> discardedReasons, MetricCollector metricCollector, + ExecutionAttributes executionAttributes) { + if (authScheme == null) { + discardedReasons.add(() -> String.format("'%s' is not enabled for this request.", authOption.schemeId())); + return null; + } + IdentityProvider identityProvider = authScheme.identityProvider(identityProviders); + if (identityProvider == null) { + discardedReasons + .add(() -> String.format("'%s' does not have an identity provider configured.", authOption.schemeId())); + return null; + } + HttpSigner signer; + try { + signer = authScheme.signer(); + } catch (RuntimeException e) { + discardedReasons.add(() -> String.format("'%s' signer could not be retrieved: %s", authOption.schemeId(), + e.getMessage())); + return null; + } + ResolveIdentityRequest.Builder identityRequestBuilder = ResolveIdentityRequest.builder(); + authOption.forEachIdentityProperty(identityRequestBuilder::putProperty); + CompletableFuture identity; + SdkMetric metric = getIdentityMetric(identityProvider); + if (metric == null) { + identity = identityProvider.resolveIdentity(identityRequestBuilder.build()); + } else { + identity = MetricUtils.reportDuration(() -> identityProvider.resolveIdentity(identityRequestBuilder.build()), + metricCollector, metric); + } + return new SelectedAuthScheme<>(identity, signer, authOption); + } + + private SdkMetric getIdentityMetric(IdentityProvider identityProvider) { + Class identityType = identityProvider.identityType(); + if (identityType == AwsCredentialsIdentity.class) { + return CoreMetric.CREDENTIALS_FETCH_DURATION; + } + if (identityType == TokenIdentity.class) { + return CoreMetric.TOKEN_FETCH_DURATION; + } + return null; + } + + private void putSelectedAuthScheme(ExecutionAttributes attributes, + SelectedAuthScheme selectedAuthScheme) { + SelectedAuthScheme existingAuthScheme = attributes.getAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME); + if (existingAuthScheme != null) { + AuthSchemeOption.Builder selectedOption = selectedAuthScheme.authSchemeOption().toBuilder(); + existingAuthScheme.authSchemeOption().forEachIdentityProperty(selectedOption::putIdentityPropertyIfAbsent); + existingAuthScheme.authSchemeOption().forEachSignerProperty(selectedOption::putSignerPropertyIfAbsent); + selectedAuthScheme = new SelectedAuthScheme<>(selectedAuthScheme.identity(), selectedAuthScheme.signer(), + selectedOption.build()); + } + attributes.putAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME, selectedAuthScheme); + } + + private void recordEnvironmentTokenBusinessMetric(SelectedAuthScheme selectedAuthScheme, + ExecutionAttributes executionAttributes) { + String tokenFromEnv = executionAttributes.getAttribute(SdkInternalExecutionAttribute.TOKEN_CONFIGURED_FROM_ENV); + if (selectedAuthScheme != null && selectedAuthScheme.authSchemeOption().schemeId().equals(BearerAuthScheme.SCHEME_ID) + && selectedAuthScheme.identity().isDone()) { + if (selectedAuthScheme.identity().getNow(null) instanceof TokenIdentity) { + TokenIdentity configuredToken = (TokenIdentity) selectedAuthScheme.identity().getNow(null); + if (configuredToken.token().equals(tokenFromEnv)) { + executionAttributes.getAttribute(SdkInternalExecutionAttribute.BUSINESS_METRICS).addMetric( + BusinessMetricFeatureId.BEARER_SERVICE_ENV_VARS.value()); + } + } + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java new file mode 100644 index 000000000000..279142374e6b --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java @@ -0,0 +1,51 @@ +package software.amazon.awssdk.services.query.auth.scheme.internal; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.services.query.auth.scheme.QueryAuthSchemeParams; +import software.amazon.awssdk.services.query.auth.scheme.QueryAuthSchemeProvider; +import software.amazon.awssdk.utils.CollectionUtils; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class PreferredQueryAuthSchemeProvider implements QueryAuthSchemeProvider { + private final QueryAuthSchemeProvider delegate; + + private final List authSchemePreference; + + public PreferredQueryAuthSchemeProvider(QueryAuthSchemeProvider delegate, List authSchemePreference) { + this.delegate = delegate; + this.authSchemePreference = authSchemePreference != null ? authSchemePreference : Collections.emptyList(); + } + + /** + * Resolve the auth schemes based on the given set of parameters. + */ + @Override + public List resolveAuthScheme(QueryAuthSchemeParams params) { + List candidateAuthSchemes = delegate.resolveAuthScheme(params); + if (CollectionUtils.isNullOrEmpty(authSchemePreference)) { + return candidateAuthSchemes; + } + List authSchemes = new ArrayList<>(); + authSchemePreference.forEach(preferredSchemeId -> { + candidateAuthSchemes + .stream() + .filter(candidate -> { + String candidateSchemeName = candidate.schemeId().contains("#") ? candidate.schemeId().split("#")[1] + : candidate.schemeId(); + return candidateSchemeName.equals(preferredSchemeId); + }).findFirst().ifPresent(authSchemes::add); + }); + candidateAuthSchemes.forEach(candidate -> { + if (!authSchemes.contains(candidate)) { + authSchemes.add(candidate); + } + }); + return authSchemes; + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java index a4f84dc2665a..cdbc7c4c24d0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java @@ -1,18 +1,3 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - package software.amazon.awssdk.services.query.auth.scheme; import java.util.List; @@ -22,6 +7,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.services.query.auth.scheme.internal.DefaultQueryAuthSchemeProvider; +import software.amazon.awssdk.services.query.auth.scheme.internal.PreferredQueryAuthSchemeProvider; /** * An auth scheme provider for Query service. The auth scheme provider takes a set of parameters using @@ -50,4 +36,11 @@ default List resolveAuthScheme(Consumer authSchemePreference) { + return new PreferredQueryAuthSchemeProvider(defaultProvider(), authSchemePreference); + } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java index a4f84dc2665a..cdbc7c4c24d0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java @@ -1,18 +1,3 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - package software.amazon.awssdk.services.query.auth.scheme; import java.util.List; @@ -22,6 +7,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.services.query.auth.scheme.internal.DefaultQueryAuthSchemeProvider; +import software.amazon.awssdk.services.query.auth.scheme.internal.PreferredQueryAuthSchemeProvider; /** * An auth scheme provider for Query service. The auth scheme provider takes a set of parameters using @@ -50,4 +36,11 @@ default List resolveAuthScheme(Consumer authSchemePreference) { + return new PreferredQueryAuthSchemeProvider(defaultProvider(), authSchemePreference); + } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java index 4dee534fba41..ee8f9a73d3e5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -59,14 +60,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -77,7 +79,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -93,21 +95,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -126,7 +128,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } @@ -196,6 +205,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java index 83b6266466fd..a0bdac67d04d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java @@ -12,6 +12,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -70,15 +71,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -89,82 +91,82 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); if (serviceConfigBuilder.dualstackEnabled() != null) { Validate.validState( - config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, - "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); + config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, + "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); } else { serviceConfigBuilder.dualstackEnabled(config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.fipsModeEnabled() != null) { Validate.validState( - config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, - "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); + config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, + "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); } else { serviceConfigBuilder.fipsModeEnabled(config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.useArnRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, - "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); + clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, + "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); } else { serviceConfigBuilder.useArnRegionEnabled(clientContextParams.get(JsonClientContextParams.USE_ARN_REGION)); } if (serviceConfigBuilder.multiRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, - "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); + clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, + "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); } else if (clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) != null) { serviceConfigBuilder.multiRegionEnabled(!clientContextParams - .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); + .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); } if (serviceConfigBuilder.pathStyleAccessEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, - "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); + clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, + "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); } else { serviceConfigBuilder.pathStyleAccessEnabled(clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE)); } if (serviceConfigBuilder.accelerateModeEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, - "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); + clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, + "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); } else { serviceConfigBuilder.accelerateModeEnabled(clientContextParams.get(JsonClientContextParams.ACCELERATE)); } Boolean checksumValidationEnabled = serviceConfigBuilder.checksumValidationEnabled(); if (checksumValidationEnabled != null) { Validate.validState( - config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); Validate.validState( - config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); if (checksumValidationEnabled) { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); } else { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); } } ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); clientContextParams.put(JsonClientContextParams.USE_ARN_REGION, finalServiceConfig.useArnRegionEnabled()); clientContextParams.put(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS, - !finalServiceConfig.multiRegionEnabled()); + !finalServiceConfig.multiRegionEnabled()); clientContextParams.put(JsonClientContextParams.FORCE_PATH_STYLE, finalServiceConfig.pathStyleAccessEnabled()); clientContextParams.put(JsonClientContextParams.ACCELERATE, finalServiceConfig.accelerateModeEnabled()); SdkClientConfiguration.Builder builder = config.toBuilder(); @@ -189,21 +191,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon } builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -225,7 +227,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } @@ -327,9 +336,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -338,15 +347,15 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java index 80511b9556ce..360d3664eaad 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -68,14 +69,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -143,7 +145,14 @@ public B authSchemeProvider(QueryAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private QueryAuthSchemeProvider defaultAuthSchemeProvider() { + private QueryAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return QueryAuthSchemeProvider.defaultProvider(preferences); + } return QueryAuthSchemeProvider.defaultProvider(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java index e7a2428380aa..9b143b9ccd69 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java @@ -8,6 +8,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -55,10 +56,12 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -77,7 +80,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -93,21 +96,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -126,7 +129,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java index 6baf26fa580e..117e19038881 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -65,15 +66,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -84,17 +86,17 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { @@ -112,21 +114,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -148,7 +150,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } @@ -246,9 +255,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -257,15 +266,15 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java new file mode 100644 index 000000000000..48ecf08535fa --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java @@ -0,0 +1,227 @@ +package software.amazon.awssdk.services.json; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.TokenUtils; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; +import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; +import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; +import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; +import software.amazon.awssdk.protocols.json.internal.unmarshall.SdkClientJsonProtocolAdvancedOption; +import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeProvider; +import software.amazon.awssdk.services.json.auth.scheme.internal.JsonAuthSchemeInterceptor; +import software.amazon.awssdk.services.json.endpoints.JsonEndpointProvider; +import software.amazon.awssdk.services.json.endpoints.internal.JsonRequestSetEndpointInterceptor; +import software.amazon.awssdk.services.json.endpoints.internal.JsonResolveEndpointInterceptor; +import software.amazon.awssdk.services.json.internal.EnvironmentTokenSystemSettings; +import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * Internal base class for {@link DefaultJsonClientBuilder} and {@link DefaultJsonAsyncClientBuilder}. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +abstract class DefaultJsonBaseClientBuilder, C> extends AwsDefaultClientBuilder { + private final Map> additionalAuthSchemes = new HashMap<>(); + + @Override + protected final String serviceEndpointPrefix() { + return "json-service-endpoint"; + } + + @Override + protected final String serviceName() { + return "Json"; + } + + @Override + protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + Optional tokenFromEnv = new EnvironmentTokenSystemSettings().getStringValue(); + if (tokenFromEnv.isPresent() && config.option(SdkClientOption.AUTH_SCHEME_PROVIDER) == null + && config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER) == null) { + c.option(SdkClientOption.AUTH_SCHEME_PROVIDER, + JsonAuthSchemeProvider.defaultProvider(Collections.singletonList("httpBearerAuth"))); + c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, StaticTokenProvider.create(tokenFromEnv::get)); + c.option( + SdkClientOption.EXECUTION_ATTRIBUTES, + ExecutionAttributes.builder() + .put(SdkInternalExecutionAttribute.TOKEN_CONFIGURED_FROM_ENV, tokenFromEnv.get()).build()); + } else { + c.option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)); + } + }); + } + + @Override + protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientConfiguration config) { + List endpointInterceptors = new ArrayList<>(); + endpointInterceptors.add(new JsonAuthSchemeInterceptor()); + endpointInterceptors.add(new JsonResolveEndpointInterceptor()); + endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); + ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); + List interceptors = interceptorFactory + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + List additionalInterceptors = new ArrayList<>(); + interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); + interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); + interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + SdkClientConfiguration.Builder builder = config.toBuilder(); + builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { + IdentityProviders.Builder result = IdentityProviders.builder(); + IdentityProvider tokenIdentityProvider = c.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + if (tokenIdentityProvider != null) { + result.putIdentityProvider(tokenIdentityProvider); + } + return result.build(); + }); + builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); + builder.lazyOptionIfAbsent( + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); + return builder.build(); + } + + @Override + protected final String signingName() { + return "json-service"; + } + + private JsonEndpointProvider defaultEndpointProvider() { + return JsonEndpointProvider.defaultProvider(); + } + + public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { + clientConfiguration.option(SdkClientOption.AUTH_SCHEME_PROVIDER, authSchemeProvider); + return thisBuilder(); + } + + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } + return JsonAuthSchemeProvider.defaultProvider(); + } + + @Override + public B putAuthScheme(AuthScheme authScheme) { + additionalAuthSchemes.put(authScheme.schemeId(), authScheme); + return thisBuilder(); + } + + private Map> authSchemes() { + Map> schemes = new HashMap<>(2 + this.additionalAuthSchemes.size()); + BearerAuthScheme bearerAuthScheme = BearerAuthScheme.create(); + schemes.put(bearerAuthScheme.schemeId(), bearerAuthScheme); + NoAuthAuthScheme noAuthAuthScheme = NoAuthAuthScheme.create(); + schemes.put(noAuthAuthScheme.schemeId(), noAuthAuthScheme); + schemes.putAll(this.additionalAuthSchemes); + return schemes; + } + + private IdentityProvider defaultTokenProvider() { + return DefaultAwsTokenProvider.create(); + } + + @Override + protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { + List internalPlugins = internalPlugins(config); + List externalPlugins = plugins(); + if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { + return config; + } + List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); + SdkClientConfiguration.Builder configuration = config.toBuilder(); + JsonServiceClientConfigurationBuilder serviceConfigBuilder = new JsonServiceClientConfigurationBuilder(configuration); + for (SdkPlugin plugin : plugins) { + plugin.configureClient(serviceConfigBuilder); + } + updateRetryStrategyClientConfiguration(configuration); + return configuration.build(); + } + + private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { + ClientOverrideConfiguration.Builder builder = configuration.asOverrideConfigurationBuilder(); + RetryMode retryMode = builder.retryMode(); + if (retryMode != null) { + configuration.option(SdkClientOption.RETRY_STRATEGY, AwsRetryStrategy.forRetryMode(retryMode)); + } else { + Consumer> configurator = builder.retryStrategyConfigurator(); + if (configurator != null) { + RetryStrategy.Builder defaultBuilder = AwsRetryStrategy.defaultRetryStrategy().toBuilder(); + configurator.accept(defaultBuilder); + configuration.option(SdkClientOption.RETRY_STRATEGY, defaultBuilder.build()); + } else { + RetryStrategy retryStrategy = builder.retryStrategy(); + if (retryStrategy != null) { + configuration.option(SdkClientOption.RETRY_STRATEGY, retryStrategy); + } + } + } + configuration.option(SdkClientOption.CONFIGURED_RETRY_MODE, null); + configuration.option(SdkClientOption.CONFIGURED_RETRY_STRATEGY, null); + configuration.option(SdkClientOption.CONFIGURED_RETRY_CONFIGURATOR, null); + } + + private List internalPlugins(SdkClientConfiguration config) { + return Collections.emptyList(); + } + + protected static void validateClientOptions(SdkClientConfiguration c) { + Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), + "The 'tokenProvider' must be configured in the client builder."); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java index 1e5d47f1235c..75faf2cad7a8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java @@ -8,6 +8,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -43,7 +44,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { private final Map> additionalAuthSchemes = new HashMap<>(); @Override @@ -58,10 +59,12 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -72,7 +75,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -88,21 +91,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -121,7 +124,14 @@ public B authSchemeProvider(DatabaseAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private DatabaseAuthSchemeProvider defaultAuthSchemeProvider() { + private DatabaseAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return DatabaseAuthSchemeProvider.defaultProvider(preferences); + } return DatabaseAuthSchemeProvider.defaultProvider(); } @@ -153,7 +163,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -193,7 +203,7 @@ protected static void validateClientOptions(SdkClientConfiguration c) { public B sigv4aSigningRegionSet(RegionSet sigv4aSigningRegionSet) { clientConfiguration.option(AwsClientOption.AWS_SIGV4A_SIGNING_REGION_SET, - sigv4aSigningRegionSet == null ? Collections.emptySet() : sigv4aSigningRegionSet.asSet()); + sigv4aSigningRegionSet == null ? Collections.emptySet() : sigv4aSigningRegionSet.asSet()); return thisBuilder(); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java index 74e0b6ce2709..72d4f526bfb3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java @@ -8,6 +8,9 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.TokenUtils; +import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -25,6 +28,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.identity.spi.IdentityProvider; import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.protocols.json.internal.unmarshall.SdkClientJsonProtocolAdvancedOption; import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; import software.amazon.awssdk.retries.api.RetryStrategy; @@ -35,6 +39,7 @@ import software.amazon.awssdk.services.database.endpoints.internal.DatabaseResolveEndpointInterceptor; import software.amazon.awssdk.services.database.internal.DatabaseServiceClientConfigurationBuilder; import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.Validate; /** * Internal base class for {@link DefaultDatabaseClientBuilder} and {@link DefaultDatabaseAsyncClientBuilder}. @@ -42,7 +47,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { private final Map> additionalAuthSchemes = new HashMap<>(); @Override @@ -57,10 +62,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -71,7 +81,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -79,6 +89,10 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { IdentityProviders.Builder result = IdentityProviders.builder(); + IdentityProvider tokenIdentityProvider = c.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + if (tokenIdentityProvider != null) { + result.putIdentityProvider(tokenIdentityProvider); + } IdentityProvider credentialsIdentityProvider = c.get(AwsClientOption.CREDENTIALS_IDENTITY_PROVIDER); if (credentialsIdentityProvider != null) { result.putIdentityProvider(credentialsIdentityProvider); @@ -87,21 +101,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -120,7 +134,14 @@ public B authSchemeProvider(DatabaseAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private DatabaseAuthSchemeProvider defaultAuthSchemeProvider() { + private DatabaseAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return DatabaseAuthSchemeProvider.defaultProvider(preferences); + } return DatabaseAuthSchemeProvider.defaultProvider(); } @@ -142,6 +163,10 @@ private Map> authSchemes() { return schemes; } + private IdentityProvider defaultTokenProvider() { + return DefaultAwsTokenProvider.create(); + } + @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List internalPlugins = internalPlugins(config); @@ -152,7 +177,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -188,5 +213,7 @@ private List internalPlugins(SdkClientConfiguration config) { } protected static void validateClientOptions(SdkClientConfiguration c) { + Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java index 8a42dcba4138..0be9c031d828 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java @@ -8,6 +8,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -39,7 +40,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { private final Map> additionalAuthSchemes = new HashMap<>(); @Override @@ -54,10 +55,12 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -68,7 +71,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -80,21 +83,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -113,7 +116,14 @@ public B authSchemeProvider(DatabaseAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private DatabaseAuthSchemeProvider defaultAuthSchemeProvider() { + private DatabaseAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return DatabaseAuthSchemeProvider.defaultProvider(preferences); + } return DatabaseAuthSchemeProvider.defaultProvider(); } @@ -141,7 +151,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java index 724eb838439d..19b8d5abbae1 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -66,14 +67,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -141,7 +143,14 @@ public B authSchemeProvider(QueryAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private QueryAuthSchemeProvider defaultAuthSchemeProvider() { + private QueryAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return QueryAuthSchemeProvider.defaultProvider(preferences); + } return QueryAuthSchemeProvider.defaultProvider(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java index 9895ae765031..c7932ab9f7c6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java @@ -53,13 +53,14 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -69,7 +70,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -85,21 +86,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -167,8 +168,8 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), - "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java index 6013da8b2ea5..2eca7dfdcf2c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java @@ -64,15 +64,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -82,82 +83,82 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); if (serviceConfigBuilder.dualstackEnabled() != null) { Validate.validState( - config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, - "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); + config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, + "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); } else { serviceConfigBuilder.dualstackEnabled(config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.fipsModeEnabled() != null) { Validate.validState( - config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, - "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); + config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, + "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); } else { serviceConfigBuilder.fipsModeEnabled(config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.useArnRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, - "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); + clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, + "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); } else { serviceConfigBuilder.useArnRegionEnabled(clientContextParams.get(JsonClientContextParams.USE_ARN_REGION)); } if (serviceConfigBuilder.multiRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, - "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); + clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, + "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); } else if (clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) != null) { serviceConfigBuilder.multiRegionEnabled(!clientContextParams - .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); + .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); } if (serviceConfigBuilder.pathStyleAccessEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, - "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); + clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, + "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); } else { serviceConfigBuilder.pathStyleAccessEnabled(clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE)); } if (serviceConfigBuilder.accelerateModeEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, - "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); + clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, + "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); } else { serviceConfigBuilder.accelerateModeEnabled(clientContextParams.get(JsonClientContextParams.ACCELERATE)); } Boolean checksumValidationEnabled = serviceConfigBuilder.checksumValidationEnabled(); if (checksumValidationEnabled != null) { Validate.validState( - config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); Validate.validState( - config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); if (checksumValidationEnabled) { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); } else { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); } } ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); clientContextParams.put(JsonClientContextParams.USE_ARN_REGION, finalServiceConfig.useArnRegionEnabled()); clientContextParams.put(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS, - !finalServiceConfig.multiRegionEnabled()); + !finalServiceConfig.multiRegionEnabled()); clientContextParams.put(JsonClientContextParams.FORCE_PATH_STYLE, finalServiceConfig.pathStyleAccessEnabled()); clientContextParams.put(JsonClientContextParams.ACCELERATE, finalServiceConfig.accelerateModeEnabled()); SdkClientConfiguration.Builder builder = config.toBuilder(); @@ -182,21 +183,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon } builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -301,9 +302,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -312,19 +313,19 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), - "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java index 52c27dfcc8ac..4a8a346f1c76 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java @@ -61,14 +61,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java index 12dec5b9986d..4f1e5bbd8e0d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java @@ -50,9 +50,11 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -70,7 +72,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -86,21 +88,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -164,6 +166,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java index 52f9d10e821f..778b676c4975 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java @@ -59,15 +59,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -77,17 +78,17 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { @@ -105,21 +106,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -220,9 +221,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -231,19 +232,19 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), - "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java index ddd2b3427e71..eb724ba82245 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java @@ -54,9 +54,11 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -66,7 +68,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new H2RequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -82,21 +84,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") - .serviceProfileProperty("h2_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") + .serviceProfileProperty("h2_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -118,7 +120,7 @@ private H2EndpointProvider defaultEndpointProvider() { protected final AttributeMap serviceHttpConfig() { AttributeMap result = AttributeMap.empty(); return result.merge(AttributeMap.builder().put(SdkHttpConfigurationOption.PROTOCOL, Protocol.HTTP2) - .put(SdkHttpConfigurationOption.PROTOCOL_NEGOTIATION, ProtocolNegotiation.ALPN).build()); + .put(SdkHttpConfigurationOption.PROTOCOL_NEGOTIATION, ProtocolNegotiation.ALPN).build()); } @Override @@ -167,6 +169,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java index a296652d9b41..a9dcec96847e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java @@ -53,9 +53,11 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -65,7 +67,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new H2RequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -81,21 +83,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") - .serviceProfileProperty("h2_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") + .serviceProfileProperty("h2_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -165,6 +167,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java index 4c7699ac4c6d..5ec8c0facf9a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java @@ -6,7 +6,10 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.auth.token.signer.aws.BearerTokenSigner; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -22,6 +25,7 @@ import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.identity.spi.IdentityProvider; import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.protocols.json.internal.unmarshall.SdkClientJsonProtocolAdvancedOption; import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; import software.amazon.awssdk.retries.api.RetryStrategy; @@ -38,7 +42,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { @Override protected final String serviceEndpointPrefix() { return "database-service-endpoint"; @@ -51,9 +55,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -63,7 +73,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -71,6 +81,10 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { IdentityProviders.Builder result = IdentityProviders.builder(); + IdentityProvider tokenIdentityProvider = c.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + if (tokenIdentityProvider != null) { + result.putIdentityProvider(tokenIdentityProvider); + } IdentityProvider credentialsIdentityProvider = c.get(AwsClientOption.CREDENTIALS_IDENTITY_PROVIDER); if (credentialsIdentityProvider != null) { result.putIdentityProvider(credentialsIdentityProvider); @@ -79,21 +93,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -111,6 +125,14 @@ private DatabaseEndpointProvider defaultEndpointProvider() { return DatabaseEndpointProvider.defaultProvider(); } + private IdentityProvider defaultTokenProvider() { + return DefaultAwsTokenProvider.create(); + } + + private Signer defaultTokenSigner() { + return BearerTokenSigner.create(); + } + @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List internalPlugins = internalPlugins(config); @@ -121,7 +143,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -158,6 +180,10 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java index a5aedee94c63..6a8e2290d918 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java @@ -33,7 +33,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { @Override protected final String serviceEndpointPrefix() { return "database-service-endpoint"; @@ -46,8 +46,10 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()).option( - SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()).option( + SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -57,7 +59,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -69,21 +71,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -107,7 +109,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java index df04abcb8bef..2ef919b3671f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java @@ -60,14 +60,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config new file mode 100644 index 000000000000..2edb12c857bc --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config @@ -0,0 +1,3 @@ +{ + "enableEnvironmentBearerToken": true +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json index 7120326e77ec..d9c2b68b4b6e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json @@ -4,8 +4,7 @@ "apiVersion":"2023-03-10", "auth":["aws.auth#sigv4"], "endpointPrefix":"smithyrpcv2protocol", - "protocol":"smithy-rpc-v2-cbor", - "protocols":["smithy-rpc-v2-cbor"], + "protocols":["smithy-rpc-v2-cbor", "json", "query"], "serviceFullName":"RpcV2 Protocol Service", "serviceId":"SmithyRpcV2Protocol", "signatureVersion":"v4", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java index e4a6f0f5d2ab..948b39851836 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java @@ -128,7 +128,7 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultJsonAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.AWS_JSON).build(); + .serviceProtocol(AwsServiceProtocol.AWS_JSON).build(); private final AsyncClientHandler clientHandler; @@ -178,46 +178,46 @@ public JsonUtilities utilities() { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationResponse::builder); + operationMetadata, APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -256,46 +256,46 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -329,81 +329,81 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) + .serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) - .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -418,7 +418,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -447,54 +447,54 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyInputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); + .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) - .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) + .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyInputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyInputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -529,75 +529,75 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -612,7 +612,7 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -644,46 +644,46 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( */ @Override public CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(getWithoutRequiredMembersRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(getWithoutRequiredMembersRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -718,50 +718,50 @@ public CompletableFuture getWithoutRequiredMe */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -795,46 +795,46 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithNoneAuthTypeResponse::builder); + operationMetadata, OperationWithNoneAuthTypeResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -869,51 +869,51 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -948,46 +948,46 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1022,46 +1022,46 @@ public CompletableFuture paginatedOpera */ @Override public CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithoutResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1100,50 +1100,50 @@ public CompletableFuture paginatedOp */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1187,63 +1187,63 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1254,7 +1254,7 @@ public CompletableFuture streamingInputOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1287,56 +1287,57 @@ public CompletableFuture streamingInputOutputOperation( */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1347,7 +1348,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1365,11 +1366,11 @@ public final String serviceName() { private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1421,7 +1422,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java index 235d8c308f2e..179cda47f8d4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java @@ -129,7 +129,7 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultJsonAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.CBOR).build(); + .serviceProtocol(AwsServiceProtocol.CBOR).build(); private final AsyncClientHandler clientHandler; @@ -182,46 +182,46 @@ public JsonUtilities utilities() { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationResponse::builder); + operationMetadata, APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -260,46 +260,46 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -333,81 +333,81 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) + .serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) - .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -422,7 +422,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -451,54 +451,54 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyInputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); + .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) - .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) + .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyInputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyInputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -533,75 +533,75 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -616,7 +616,7 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -648,46 +648,46 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( */ @Override public CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(getWithoutRequiredMembersRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(getWithoutRequiredMembersRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -722,50 +722,50 @@ public CompletableFuture getWithoutRequiredMe */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -799,46 +799,46 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithNoneAuthTypeResponse::builder); + operationMetadata, OperationWithNoneAuthTypeResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -873,51 +873,51 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -952,46 +952,46 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1026,46 +1026,46 @@ public CompletableFuture paginatedOpera */ @Override public CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithoutResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1104,50 +1104,50 @@ public CompletableFuture paginatedOp */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1191,63 +1191,63 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1258,7 +1258,7 @@ public CompletableFuture streamingInputOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1291,56 +1291,57 @@ public CompletableFuture streamingInputOutputOperation( */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1351,7 +1352,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1369,11 +1370,11 @@ public final String serviceName() { private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1425,7 +1426,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java index 48a6d4ca2de0..ca1e1d24abd1 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java @@ -139,7 +139,7 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultJsonAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final AsyncClientHandler clientHandler; @@ -192,43 +192,43 @@ public JsonUtilities utilities() { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationResponse::builder); + operationMetadata, APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -266,43 +266,43 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -336,43 +336,43 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, BearerAuthOperationResponse::builder); + operationMetadata, BearerAuthOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .credentialType(CredentialType.TOKEN).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .credentialType(CredentialType.TOKEN).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -406,87 +406,87 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - case "errorOne": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "errorTwo": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "errorOne": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "errorTwo": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) + .serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationRequest), restAsyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationRequest), restAsyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -501,7 +501,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -530,50 +530,50 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyInputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); + .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) - .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) + .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationWithOnlyInputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationWithOnlyInputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -608,84 +608,84 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - case "errorOne": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "errorTwo": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "errorOne": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "errorTwo": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler - .execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyOutputRequest), restAsyncResponseTransformer); + .execute( + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), restAsyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -700,7 +700,7 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -728,51 +728,51 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetOperationWithChecksumResponse::builder); + operationMetadata, GetOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -810,43 +810,43 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(getWithoutRequiredMembersRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(getWithoutRequiredMembersRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -881,47 +881,47 @@ public CompletableFuture getWithoutRequiredMe */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -956,48 +956,48 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1032,43 +1032,43 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1103,43 +1103,43 @@ public CompletableFuture paginatedOpera */ @Override public CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithoutResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1191,70 +1191,71 @@ public CompletableFuture paginatedOp */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PutOperationWithChecksumResponse::builder); + operationMetadata, PutOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(putOperationWithChecksumRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1265,7 +1266,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1298,47 +1299,47 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1382,60 +1383,60 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1446,7 +1447,7 @@ public CompletableFuture streamingInputOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1479,53 +1480,54 @@ public CompletableFuture streamingInputOutputOperation( */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1536,7 +1538,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1559,11 +1561,11 @@ public final String serviceName() { private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1615,7 +1617,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java index 746f4f882f01..8c5920371062 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java @@ -98,7 +98,7 @@ final class DefaultJsonClient implements JsonClient { private static final Logger log = Logger.loggerFor(DefaultJsonClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final SyncClientHandler clientHandler; @@ -134,45 +134,45 @@ protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); + APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -200,44 +200,44 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -261,42 +261,42 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, JsonException { + throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, BearerAuthOperationResponse::builder); + operationMetadata, BearerAuthOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -320,52 +320,52 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - JsonException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetOperationWithChecksumResponse::builder); + operationMetadata, GetOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -393,44 +393,44 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -454,49 +454,49 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -520,49 +520,49 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -586,44 +586,44 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -647,44 +647,44 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -734,61 +734,62 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PutOperationWithChecksumResponse::builder); + operationMetadata, PutOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -823,50 +824,50 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -908,54 +909,55 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -986,44 +988,44 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -1043,7 +1045,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1058,7 +1060,7 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } @@ -1101,7 +1103,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java index 5c97f4ae619c..8a7945259d3e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java @@ -110,7 +110,7 @@ final class DefaultQueryAsyncClient implements QueryAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultQueryAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final AsyncClientHandler clientHandler; @@ -155,27 +155,27 @@ protected DefaultQueryAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -213,29 +213,29 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -269,29 +269,29 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -325,37 +325,37 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -390,33 +390,33 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -450,29 +450,29 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) { + OperationWithContextParamRequest operationWithContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -506,30 +506,30 @@ public CompletableFuture operationWithContext */ @Override public CompletableFuture operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomMemberRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomMemberRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -565,30 +565,30 @@ public CompletableFuture operationWithCustomM */ @Override public CompletableFuture operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomizedOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomizedOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -624,29 +624,29 @@ public CompletableFuture o */ @Override public CompletableFuture operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithMapOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithMapOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -680,29 +680,29 @@ public CompletableFuture operatio */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -738,29 +738,29 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -795,34 +795,34 @@ public CompletableFuture operationWi */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -857,29 +857,29 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithStaticContextParamsRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithStaticContextParamsRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -931,56 +931,57 @@ public CompletableFuture operationWith */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -990,7 +991,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1023,33 +1024,33 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1088,40 +1089,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1131,7 +1133,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1154,15 +1156,15 @@ public final String serviceName() { private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java index 5961432e145d..49ddc43017b9 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java @@ -103,7 +103,7 @@ final class DefaultQueryClient implements QueryClient { private static final Logger log = Logger.loggerFor(DefaultQueryClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final SyncClientHandler clientHandler; @@ -139,17 +139,17 @@ protected DefaultQueryClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryException { + AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -157,11 +157,11 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -189,30 +189,30 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, QueryException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -236,28 +236,28 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, QueryException { + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -281,38 +281,38 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - QueryException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -336,35 +336,35 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -388,30 +388,30 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithContextParamResponse operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -435,31 +435,31 @@ public OperationWithContextParamResponse operationWithContextParam( */ @Override public OperationWithCustomMemberResponse operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, + QueryException { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -484,31 +484,31 @@ public OperationWithCustomMemberResponse operationWithCustomMember( */ @Override public OperationWithCustomizedOperationContextParamResponse operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withInput(operationWithCustomizedOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withInput(operationWithCustomizedOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -533,30 +533,30 @@ public OperationWithCustomizedOperationContextParamResponse operationWithCustomi */ @Override public OperationWithMapOperationContextParamResponse operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -580,30 +580,30 @@ public OperationWithMapOperationContextParamResponse operationWithMapOperationCo */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -628,30 +628,30 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithOperationContextParamResponse operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -675,35 +675,35 @@ public OperationWithOperationContextParamResponse operationWithOperationContextP */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -727,30 +727,30 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public OperationWithStaticContextParamsResponse operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -800,47 +800,48 @@ public OperationWithStaticContextParamsResponse operationWithStaticContextParams */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -875,36 +876,36 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -935,30 +936,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, QueryException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -983,7 +984,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1036,11 +1037,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java index c082dd8f8ab0..966f8f0ee206 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java @@ -102,7 +102,7 @@ final class DefaultXmlAsyncClient implements XmlAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultXmlAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final AsyncClientHandler clientHandler; @@ -147,26 +147,26 @@ protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) - .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -205,28 +205,28 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -261,28 +261,28 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -317,51 +317,51 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - EventStreamOperationResponseHandler asyncResponseHandler) { + EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) - .build()); + EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) + .build()); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - EventStreamTaggedUnionPojoSupplier.builder() - .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) - .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) - .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata - .builder().hasStreamingSuccessResponse(false).build()); + EventStreamTaggedUnionPojoSupplier.builder() + .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) + .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) + .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata + .builder().hasStreamingSuccessResponse(false).build()); CompletableFuture eventStreamTransformFuture = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) + .serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - restAsyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + restAsyncResponseTransformer); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(e)); + () -> asyncResponseHandler.exceptionOccurred(e)); eventStreamTransformFuture.completeExceptionally(e); } metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -370,7 +370,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(eventStreamTransformFuture, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -398,35 +398,35 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -462,31 +462,31 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -521,28 +521,28 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -578,32 +578,32 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -656,56 +656,57 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -715,7 +716,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -748,32 +749,32 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -813,40 +814,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -856,7 +858,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -874,15 +876,15 @@ public final String serviceName() { private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java index 457d5a36c2c3..7b2a515aa9a4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java @@ -85,7 +85,7 @@ final class DefaultXmlClient implements XmlClient { private static final Logger log = Logger.loggerFor(DefaultXmlClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final SyncClientHandler clientHandler; @@ -121,15 +121,15 @@ protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, XmlException { + AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler( - APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -137,10 +137,10 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -168,28 +168,28 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, XmlException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -213,27 +213,27 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, XmlException { + throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -257,36 +257,36 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - XmlException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -310,33 +310,33 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -360,28 +360,28 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - XmlException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -405,33 +405,33 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -481,47 +481,48 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -556,34 +557,34 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -614,30 +615,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, XmlException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -649,7 +650,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -702,11 +703,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index 8a79e84d719a..a6e467d42300 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -134,7 +134,7 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultJsonAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.AWS_JSON).build(); + .serviceProtocol(AwsServiceProtocol.AWS_JSON).build(); private final AsyncClientHandler clientHandler; @@ -184,46 +184,46 @@ public JsonUtilities utilities() { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationResponse::builder); + operationMetadata, APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -262,46 +262,46 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -335,82 +335,82 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); eventStreamOperationRequest = applySignerOverride(eventStreamOperationRequest, EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) + .serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) - .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -425,7 +425,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -454,56 +454,56 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyInputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); eventStreamOperationWithOnlyInputRequest = applySignerOverride(eventStreamOperationWithOnlyInputRequest, - EventStreamAws4Signer.create()); + EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); + .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) - .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) + .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyInputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyInputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -538,75 +538,75 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -621,7 +621,7 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -653,46 +653,46 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( */ @Override public CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(getWithoutRequiredMembersRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(getWithoutRequiredMembersRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -727,50 +727,50 @@ public CompletableFuture getWithoutRequiredMe */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -804,47 +804,47 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithNoneAuthTypeResponse::builder); + operationMetadata, OperationWithNoneAuthTypeResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -879,51 +879,51 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -958,46 +958,46 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1032,46 +1032,46 @@ public CompletableFuture paginatedOpera */ @Override public CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithoutResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1110,13 +1110,13 @@ public CompletableFuture paginatedOp */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -1124,39 +1124,39 @@ public CompletableFuture streamingInputOperatio streamingInputOperationRequest = applySignerOverride(streamingInputOperationRequest, AsyncAws4Signer.create()); } JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1200,65 +1200,65 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1269,7 +1269,7 @@ public CompletableFuture streamingInputOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1302,56 +1302,57 @@ public CompletableFuture streamingInputOutputOperation( */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1362,7 +1363,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1380,11 +1381,11 @@ public final String serviceName() { private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1404,8 +1405,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -1451,7 +1452,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java index 8b73813753cf..ae42324a36f6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java @@ -135,7 +135,7 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultJsonAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.CBOR).build(); + .serviceProtocol(AwsServiceProtocol.CBOR).build(); private final AsyncClientHandler clientHandler; @@ -188,46 +188,46 @@ public JsonUtilities utilities() { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationResponse::builder); + operationMetadata, APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -266,46 +266,46 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -339,82 +339,82 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); eventStreamOperationRequest = applySignerOverride(eventStreamOperationRequest, EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) + .serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) - .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withInitialRequestEvent(true).withResponseHandler(voidResponseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -429,7 +429,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -458,56 +458,56 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyInputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); eventStreamOperationWithOnlyInputRequest = applySignerOverride(eventStreamOperationWithOnlyInputRequest, - EventStreamAws4Signer.create()); + EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); + .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) - .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) + .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyInputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withInitialRequestEvent(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyInputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -542,75 +542,75 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - default: - return Optional.empty(); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(voidResponseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), asyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -625,7 +625,7 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -657,46 +657,46 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( */ @Override public CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(getWithoutRequiredMembersRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(getWithoutRequiredMembersRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -731,50 +731,50 @@ public CompletableFuture getWithoutRequiredMe */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -808,47 +808,47 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithNoneAuthTypeResponse::builder); + operationMetadata, OperationWithNoneAuthTypeResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -883,51 +883,51 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -962,46 +962,46 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1036,46 +1036,46 @@ public CompletableFuture paginatedOpera */ @Override public CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithoutResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1114,13 +1114,13 @@ public CompletableFuture paginatedOp */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -1128,39 +1128,39 @@ public CompletableFuture streamingInputOperatio streamingInputOperationRequest = applySignerOverride(streamingInputOperationRequest, AsyncAws4Signer.create()); } JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1204,65 +1204,65 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1273,7 +1273,7 @@ public CompletableFuture streamingInputOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1306,56 +1306,57 @@ public CompletableFuture streamingInputOutputOperation( */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1366,7 +1367,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1384,11 +1385,11 @@ public final String serviceName() { private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1408,8 +1409,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -1455,7 +1456,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java index cfa013f73a9d..7741026207f2 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java @@ -90,7 +90,7 @@ final class DefaultJsonClient implements JsonClient { private static final Logger log = Logger.loggerFor(DefaultJsonClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.CBOR).build(); + .serviceProtocol(AwsServiceProtocol.CBOR).build(); private final SyncClientHandler clientHandler; @@ -126,48 +126,48 @@ protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); + APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -196,47 +196,47 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, ServiceFaultException, - AwsServiceException, SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, ServiceFaultException, + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -264,47 +264,47 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -328,52 +328,52 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -397,47 +397,47 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - JsonException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithNoneAuthTypeResponse::builder); + operationMetadata, OperationWithNoneAuthTypeResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -461,52 +461,52 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -530,47 +530,47 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -594,47 +594,47 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -669,53 +669,53 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -757,57 +757,58 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -838,47 +839,47 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInputException": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "ServiceFaultException": - return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) - .exceptionBuilderSupplier(ServiceFaultException::builder).build()); - default: - return Optional.empty(); + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -898,7 +899,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -913,7 +914,7 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } @@ -956,7 +957,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java new file mode 100644 index 000000000000..bb604b75807b --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java @@ -0,0 +1,24 @@ +package software.amazon.awssdk.services.json.internal; + +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.SystemSetting; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public class EnvironmentTokenSystemSettings implements SystemSetting { + @Override + public String property() { + return "aws.bearerTokenJsonService"; + } + + @Override + public String environmentVariable() { + return "AWS_BEARER_TOKEN_JSON_SERVICE"; + } + + @Override + public String defaultValue() { + return null; + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 68816b5809c0..65a319c88a89 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -146,7 +146,7 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultJsonAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final AsyncClientHandler clientHandler; @@ -199,43 +199,43 @@ public JsonUtilities utilities() { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationResponse::builder); + operationMetadata, APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -273,43 +273,43 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -343,44 +343,44 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, BearerAuthOperationResponse::builder); + operationMetadata, BearerAuthOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .credentialType(CredentialType.TOKEN).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .credentialType(CredentialType.TOKEN).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -414,88 +414,88 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); eventStreamOperationRequest = applySignerOverride(eventStreamOperationRequest, EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - case "errorOne": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "errorTwo": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "errorOne": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "errorTwo": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorEventResponseHandler).future(future).executor(executor) + .serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationRequest), restAsyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationRequest), restAsyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -510,7 +510,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -539,52 +539,52 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyInputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); eventStreamOperationWithOnlyInputRequest = applySignerOverride(eventStreamOperationWithOnlyInputRequest, - EventStreamAws4Signer.create()); + EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); + .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) - .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); + .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) + .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( - AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); + AwsClientHandlerUtils::encodeEventStreamRequestToByteBuffer); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationWithOnlyInputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyInput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationWithOnlyInputRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -619,84 +619,84 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationWithOnlyOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) - .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) - .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) - .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventStream::eventOneBuilder) + .putSdkPojoSupplier("EventTheSecond", EventStream::eventTheSecondBuilder) + .putSdkPojoSupplier("secondEventOne", EventStream::secondEventOneBuilder) + .putSdkPojoSupplier("eventThree", EventStream::eventThreeBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); Function> eventstreamExceptionMetadataMapper = errorCode -> { switch (errorCode) { - case "errorOne": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - case "errorTwo": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "errorOne": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "errorTwo": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, eventstreamExceptionMetadataMapper); + operationMetadata, eventstreamExceptionMetadataMapper); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorEventResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler - .execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationWithOnlyOutputRequest), restAsyncResponseTransformer); + .execute( + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), restAsyncResponseTransformer); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { @@ -711,7 +711,7 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -739,51 +739,51 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetOperationWithChecksumResponse::builder); + operationMetadata, GetOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -821,43 +821,43 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(getWithoutRequiredMembersRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(getWithoutRequiredMembersRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -892,47 +892,47 @@ public CompletableFuture getWithoutRequiredMe */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -967,48 +967,48 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1043,43 +1043,43 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1114,43 +1114,43 @@ public CompletableFuture paginatedOpera */ @Override public CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(paginatedOperationWithoutResultKeyRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1202,73 +1202,74 @@ public CompletableFuture paginatedOp */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); if (!isSignerOverridden(clientConfiguration)) { putOperationWithChecksumRequest = applySignerOverride(putOperationWithChecksumRequest, AsyncAws4Signer.create()); } JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PutOperationWithChecksumResponse::builder); + operationMetadata, PutOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(putOperationWithChecksumRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1279,7 +1280,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1312,13 +1313,13 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -1326,36 +1327,36 @@ public CompletableFuture streamingInputOperatio streamingInputOperationRequest = applySignerOverride(streamingInputOperationRequest, AsyncAws4Signer.create()); } JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); @@ -1399,62 +1400,62 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1465,7 +1466,7 @@ public CompletableFuture streamingInputOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1498,53 +1499,54 @@ public CompletableFuture streamingInputOutputOperation( */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1555,7 +1557,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1578,11 +1580,11 @@ public final String serviceName() { private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1602,8 +1604,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -1649,7 +1651,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index cded3b503aec..36bbe1316c9a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -103,7 +103,7 @@ final class DefaultJsonClient implements JsonClient { private static final Logger log = Logger.loggerFor(DefaultJsonClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final SyncClientHandler clientHandler; @@ -139,45 +139,45 @@ protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); + APostOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -205,44 +205,44 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -266,43 +266,43 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, JsonException { + throws AwsServiceException, SdkClientException, JsonException { bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, BearerAuthOperationResponse::builder); + operationMetadata, BearerAuthOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -326,52 +326,52 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - JsonException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetOperationWithChecksumResponse::builder); + operationMetadata, GetOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -399,44 +399,44 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -460,49 +460,49 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); + operationMetadata, OperationWithChecksumRequiredResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -526,49 +526,49 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); + operationMetadata, OperationWithRequestCompressionResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -592,44 +592,44 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -653,44 +653,44 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -740,61 +740,62 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PutOperationWithChecksumResponse::builder); + operationMetadata, PutOperationWithChecksumResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -829,50 +830,50 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -914,56 +915,57 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -994,44 +996,44 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); Function> exceptionMetadataMapper = errorCode -> { if (errorCode == null) { return Optional.empty(); } switch (errorCode) { - case "InvalidInput": - return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) - .exceptionBuilderSupplier(InvalidInputException::builder).build()); - default: - return Optional.empty(); + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); } }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata, exceptionMetadataMapper); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -1051,8 +1053,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -1062,7 +1064,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1077,7 +1079,7 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } @@ -1120,7 +1122,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private > T init(T builder) { return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index 3878372becba..042d0d6220b8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -115,7 +115,7 @@ final class DefaultQueryAsyncClient implements QueryAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultQueryAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final AsyncClientHandler clientHandler; @@ -160,27 +160,27 @@ protected DefaultQueryAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -218,29 +218,29 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -274,30 +274,30 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -331,37 +331,37 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -396,33 +396,33 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -456,29 +456,29 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) { + OperationWithContextParamRequest operationWithContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -512,30 +512,30 @@ public CompletableFuture operationWithContext */ @Override public CompletableFuture operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomMemberRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomMemberRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -571,30 +571,30 @@ public CompletableFuture operationWithCustomM */ @Override public CompletableFuture operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomizedOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomizedOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -630,29 +630,29 @@ public CompletableFuture o */ @Override public CompletableFuture operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithMapOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithMapOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -686,30 +686,30 @@ public CompletableFuture operatio */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -745,29 +745,29 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -802,34 +802,34 @@ public CompletableFuture operationWi */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -864,29 +864,29 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithStaticContextParamsRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithStaticContextParamsRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -938,19 +938,19 @@ public CompletableFuture operationWith */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); if (!isSignerOverridden(clientConfiguration)) { @@ -958,39 +958,40 @@ public CompletableFuture putOperationWithChecksum( } HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1000,7 +1001,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1033,13 +1034,13 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -1048,21 +1049,21 @@ public CompletableFuture streamingInputOperatio } HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1101,40 +1102,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1144,7 +1146,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1167,15 +1169,15 @@ public final String serviceName() { private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1195,8 +1197,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index 5f013b28da68..8025c0c0568f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -107,7 +107,7 @@ final class DefaultQueryClient implements QueryClient { private static final Logger log = Logger.loggerFor(DefaultQueryClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final SyncClientHandler clientHandler; @@ -143,17 +143,17 @@ protected DefaultQueryClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryException { + AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -161,11 +161,11 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -193,30 +193,30 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, QueryException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -240,29 +240,29 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, QueryException { + throws AwsServiceException, SdkClientException, QueryException { bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -286,38 +286,38 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - QueryException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -341,35 +341,35 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -393,30 +393,30 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithContextParamResponse operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -440,31 +440,31 @@ public OperationWithContextParamResponse operationWithContextParam( */ @Override public OperationWithCustomMemberResponse operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, + QueryException { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -489,31 +489,31 @@ public OperationWithCustomMemberResponse operationWithCustomMember( */ @Override public OperationWithCustomizedOperationContextParamResponse operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withInput(operationWithCustomizedOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withInput(operationWithCustomizedOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -538,30 +538,30 @@ public OperationWithCustomizedOperationContextParamResponse operationWithCustomi */ @Override public OperationWithMapOperationContextParamResponse operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -585,31 +585,31 @@ public OperationWithMapOperationContextParamResponse operationWithMapOperationCo */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -634,30 +634,30 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithOperationContextParamResponse operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -681,35 +681,35 @@ public OperationWithOperationContextParamResponse operationWithOperationContextP */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -733,30 +733,30 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public OperationWithStaticContextParamsResponse operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -806,47 +806,48 @@ public OperationWithStaticContextParamsResponse operationWithStaticContextParams */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -881,36 +882,36 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -941,30 +942,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, QueryException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -989,8 +990,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -1000,7 +1001,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1053,11 +1054,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index 4f777ed50154..5815d387f2fe 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -107,7 +107,7 @@ final class DefaultXmlAsyncClient implements XmlAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultXmlAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final AsyncClientHandler clientHandler; @@ -152,26 +152,26 @@ protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) - .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -210,28 +210,28 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -266,29 +266,29 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -323,51 +323,51 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - EventStreamOperationResponseHandler asyncResponseHandler) { + EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) - .build()); + EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) + .build()); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - EventStreamTaggedUnionPojoSupplier.builder() - .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) - .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) - .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata - .builder().hasStreamingSuccessResponse(false).build()); + EventStreamTaggedUnionPojoSupplier.builder() + .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) + .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) + .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata + .builder().hasStreamingSuccessResponse(false).build()); CompletableFuture eventStreamTransformFuture = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) + .serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - restAsyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + restAsyncResponseTransformer); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(e)); + () -> asyncResponseHandler.exceptionOccurred(e)); eventStreamTransformFuture.completeExceptionally(e); } metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -376,7 +376,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(eventStreamTransformFuture, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -404,35 +404,35 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -468,31 +468,31 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -527,29 +527,29 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -585,32 +585,32 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -663,19 +663,19 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); if (!isSignerOverridden(clientConfiguration)) { @@ -683,39 +683,40 @@ public CompletableFuture putOperationWithChecksum( } HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -725,7 +726,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -758,13 +759,13 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -773,20 +774,20 @@ public CompletableFuture streamingInputOperatio } HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -826,40 +827,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -869,7 +871,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -887,15 +889,15 @@ public final String serviceName() { private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -915,8 +917,8 @@ private T applySignerOverride(T request, Signer signer) { } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index 9bad15fd6e47..bb35c8751301 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -89,7 +89,7 @@ final class DefaultXmlClient implements XmlClient { private static final Logger log = Logger.loggerFor(DefaultXmlClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final SyncClientHandler clientHandler; @@ -125,15 +125,15 @@ protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, XmlException { + AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler( - APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -141,10 +141,10 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -172,28 +172,28 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, XmlException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -217,28 +217,28 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, XmlException { + throws AwsServiceException, SdkClientException, XmlException { bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -262,36 +262,36 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - XmlException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -315,33 +315,33 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -365,29 +365,29 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - XmlException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -411,33 +411,33 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -487,47 +487,48 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -562,34 +563,34 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -620,30 +621,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, XmlException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -655,8 +656,8 @@ private T applySignerOverride(T request, Signer signer) { } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -666,7 +667,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -719,11 +720,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java index 3552d6abd9c3..f535c3d73c74 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.json.JsonServiceClientConfiguration; import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeProvider; @@ -167,6 +168,23 @@ public JsonAuthSchemeProvider authSchemeProvider() { + JsonAuthSchemeProvider.class.getSimpleName()); } + /** + * Sets the value for token provider + */ + @Override + public JsonServiceClientConfiguration.Builder tokenProvider(IdentityProvider tokenProvider) { + config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, tokenProvider); + return this; + } + + /** + * Gets the value for token provider + */ + @Override + public IdentityProvider tokenProvider() { + return config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + } + /** * Sets the value for client behavior for response checksum validation */ diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java index 942a770b61a3..684823698760 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java @@ -23,7 +23,9 @@ public final class DefaultQueryEndpointProvider implements QueryEndpointProvider public CompletableFuture resolveEndpoint(QueryEndpointParams params) { Validate.notNull(params.region(), "Parameter 'region' must not be null"); try { - RuleResult result = endpointRule0(params, new LocalState(params.region())); + Region region = params.region(); + String regionId = region == null ? null : region.id(); + RuleResult result = endpointRule0(params, regionId); if (result.canContinue()) { throw SdkClientException.create("Rule engine did not reach an error or endpoint result"); } @@ -40,170 +42,101 @@ public CompletableFuture resolveEndpoint(QueryEndpointParams params) { } } - private static RuleResult endpointRule0(QueryEndpointParams params, LocalState locals) { - return endpointRule1(params, locals); + private static RuleResult endpointRule0(QueryEndpointParams params, String region) { + return endpointRule1(params, region); } - private static RuleResult endpointRule1(QueryEndpointParams params, LocalState locals) { - RulePartition partitionResult = null; - if ((partitionResult = RulesFunctions.awsPartition(locals.region())) != null) { - locals = locals.toBuilder().partitionResult(partitionResult).build(); - RuleResult result = endpointRule2(params, locals); + private static RuleResult endpointRule1(QueryEndpointParams params, String region) { + RulePartition partitionResult = RulesFunctions.awsPartition(region); + if (partitionResult != null) { + RuleResult result = endpointRule2(params, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule6(params, locals); + result = endpointRule6(params, region, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule11(params, locals); - if (result.isResolved()) { - return result; + return RuleResult.error(region + " is not a valid HTTP host-label"); + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() + && params.arnList() != null) { + String firstArn = RulesFunctions.listAccess(params.arnList(), 0); + if (firstArn != null) { + RuleArn parsedArn = RulesFunctions.awsParseArn(firstArn); + if (parsedArn != null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + } } - return endpointRule12(params, locals); } return RuleResult.carryOn(); } - private static RuleResult endpointRule2(QueryEndpointParams params, LocalState locals) { + private static RuleResult endpointRule2(QueryEndpointParams params, RulePartition partitionResult) { if (params.endpointId() != null) { - RuleResult result = endpointRule3(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule4(params, locals); - if (result.isResolved()) { - return result; - } - return endpointRule5(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule3(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { - return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule4(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule5(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); - } - - private static RuleResult endpointRule6(QueryEndpointParams params, LocalState locals) { - if (RulesFunctions.isValidHostLabel(locals.region(), false)) { - RuleResult result = endpointRule7(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule8(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { + return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); } - result = endpointRule9(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - return endpointRule10(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule7(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) + .build())).build()); } return RuleResult.carryOn(); } - private static RuleResult endpointRule8(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build(), SigV4AuthScheme.builder().signingName("query").signingRegion(locals.region()) - .build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule9(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() - && params.useFipsEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule10(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint.builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dnsSuffix())).build()); - } - - private static RuleResult endpointRule11(QueryEndpointParams params, LocalState locals) { - return RuleResult.error(locals.region() + " is not a valid HTTP host-label"); - } - - private static RuleResult endpointRule12(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() - && params.arnList() != null) { - String firstArn = null; - RuleArn parsedArn = null; - if ((firstArn = RulesFunctions.listAccess(params.arnList(), 0)) != null) { - locals = locals.toBuilder().firstArn(firstArn).build(); - } else { - return RuleResult.carryOn(); + private static RuleResult endpointRule6(QueryEndpointParams params, String region, RulePartition partitionResult) { + if (RulesFunctions.isValidHostLabel(region, false)) { + if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - if ((parsedArn = RulesFunctions.awsParseArn(locals.firstArn())) != null) { - locals = locals.toBuilder().parsedArn(parsedArn).build(); + if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); + .builder() + .url(URI.create("https://query." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build(), + SigV4AuthScheme.builder().signingName("query").signingRegion(region).build())).build()); } + if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() + && params.useFipsEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + return RuleResult.endpoint(Endpoint.builder() + .url(URI.create("https://query." + region + "." + partitionResult.dnsSuffix())).build()); } return RuleResult.carryOn(); } @@ -217,107 +150,4 @@ public boolean equals(Object rhs) { public int hashCode() { return getClass().hashCode(); } - - private static final class LocalState { - private final String region; - - private final RulePartition partitionResult; - - private final String firstArn; - - private final RuleArn parsedArn; - - LocalState() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(Region region) { - if (region != null) { - this.region = region.id(); - } else { - this.region = null; - } - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(LocalStateBuilder builder) { - this.region = builder.region; - this.partitionResult = builder.partitionResult; - this.firstArn = builder.firstArn; - this.parsedArn = builder.parsedArn; - } - - public String region() { - return this.region; - } - - public RulePartition partitionResult() { - return this.partitionResult; - } - - public String firstArn() { - return this.firstArn; - } - - public RuleArn parsedArn() { - return this.parsedArn; - } - - public LocalStateBuilder toBuilder() { - return new LocalStateBuilder(this); - } - } - - private static final class LocalStateBuilder { - private String region; - - private RulePartition partitionResult; - - private String firstArn; - - private RuleArn parsedArn; - - LocalStateBuilder() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalStateBuilder(LocalState locals) { - this.region = locals.region; - this.partitionResult = locals.partitionResult; - this.firstArn = locals.firstArn; - this.parsedArn = locals.parsedArn; - } - - public LocalStateBuilder region(String value) { - this.region = value; - return this; - } - - public LocalStateBuilder partitionResult(RulePartition value) { - this.partitionResult = value; - return this; - } - - public LocalStateBuilder firstArn(String value) { - this.firstArn = value; - return this; - } - - public LocalStateBuilder parsedArn(RuleArn value) { - this.parsedArn = value; - return this; - } - - LocalState build() { - return new LocalState(this); - } - } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java index 942a770b61a3..684823698760 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java @@ -23,7 +23,9 @@ public final class DefaultQueryEndpointProvider implements QueryEndpointProvider public CompletableFuture resolveEndpoint(QueryEndpointParams params) { Validate.notNull(params.region(), "Parameter 'region' must not be null"); try { - RuleResult result = endpointRule0(params, new LocalState(params.region())); + Region region = params.region(); + String regionId = region == null ? null : region.id(); + RuleResult result = endpointRule0(params, regionId); if (result.canContinue()) { throw SdkClientException.create("Rule engine did not reach an error or endpoint result"); } @@ -40,170 +42,101 @@ public CompletableFuture resolveEndpoint(QueryEndpointParams params) { } } - private static RuleResult endpointRule0(QueryEndpointParams params, LocalState locals) { - return endpointRule1(params, locals); + private static RuleResult endpointRule0(QueryEndpointParams params, String region) { + return endpointRule1(params, region); } - private static RuleResult endpointRule1(QueryEndpointParams params, LocalState locals) { - RulePartition partitionResult = null; - if ((partitionResult = RulesFunctions.awsPartition(locals.region())) != null) { - locals = locals.toBuilder().partitionResult(partitionResult).build(); - RuleResult result = endpointRule2(params, locals); + private static RuleResult endpointRule1(QueryEndpointParams params, String region) { + RulePartition partitionResult = RulesFunctions.awsPartition(region); + if (partitionResult != null) { + RuleResult result = endpointRule2(params, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule6(params, locals); + result = endpointRule6(params, region, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule11(params, locals); - if (result.isResolved()) { - return result; + return RuleResult.error(region + " is not a valid HTTP host-label"); + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() + && params.arnList() != null) { + String firstArn = RulesFunctions.listAccess(params.arnList(), 0); + if (firstArn != null) { + RuleArn parsedArn = RulesFunctions.awsParseArn(firstArn); + if (parsedArn != null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + } } - return endpointRule12(params, locals); } return RuleResult.carryOn(); } - private static RuleResult endpointRule2(QueryEndpointParams params, LocalState locals) { + private static RuleResult endpointRule2(QueryEndpointParams params, RulePartition partitionResult) { if (params.endpointId() != null) { - RuleResult result = endpointRule3(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule4(params, locals); - if (result.isResolved()) { - return result; - } - return endpointRule5(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule3(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { - return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule4(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule5(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); - } - - private static RuleResult endpointRule6(QueryEndpointParams params, LocalState locals) { - if (RulesFunctions.isValidHostLabel(locals.region(), false)) { - RuleResult result = endpointRule7(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule8(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { + return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); } - result = endpointRule9(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - return endpointRule10(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule7(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) + .build())).build()); } return RuleResult.carryOn(); } - private static RuleResult endpointRule8(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build(), SigV4AuthScheme.builder().signingName("query").signingRegion(locals.region()) - .build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule9(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() - && params.useFipsEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule10(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint.builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dnsSuffix())).build()); - } - - private static RuleResult endpointRule11(QueryEndpointParams params, LocalState locals) { - return RuleResult.error(locals.region() + " is not a valid HTTP host-label"); - } - - private static RuleResult endpointRule12(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() - && params.arnList() != null) { - String firstArn = null; - RuleArn parsedArn = null; - if ((firstArn = RulesFunctions.listAccess(params.arnList(), 0)) != null) { - locals = locals.toBuilder().firstArn(firstArn).build(); - } else { - return RuleResult.carryOn(); + private static RuleResult endpointRule6(QueryEndpointParams params, String region, RulePartition partitionResult) { + if (RulesFunctions.isValidHostLabel(region, false)) { + if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - if ((parsedArn = RulesFunctions.awsParseArn(locals.firstArn())) != null) { - locals = locals.toBuilder().parsedArn(parsedArn).build(); + if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); + .builder() + .url(URI.create("https://query." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build(), + SigV4AuthScheme.builder().signingName("query").signingRegion(region).build())).build()); } + if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() + && params.useFipsEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + return RuleResult.endpoint(Endpoint.builder() + .url(URI.create("https://query." + region + "." + partitionResult.dnsSuffix())).build()); } return RuleResult.carryOn(); } @@ -217,107 +150,4 @@ public boolean equals(Object rhs) { public int hashCode() { return getClass().hashCode(); } - - private static final class LocalState { - private final String region; - - private final RulePartition partitionResult; - - private final String firstArn; - - private final RuleArn parsedArn; - - LocalState() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(Region region) { - if (region != null) { - this.region = region.id(); - } else { - this.region = null; - } - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(LocalStateBuilder builder) { - this.region = builder.region; - this.partitionResult = builder.partitionResult; - this.firstArn = builder.firstArn; - this.parsedArn = builder.parsedArn; - } - - public String region() { - return this.region; - } - - public RulePartition partitionResult() { - return this.partitionResult; - } - - public String firstArn() { - return this.firstArn; - } - - public RuleArn parsedArn() { - return this.parsedArn; - } - - public LocalStateBuilder toBuilder() { - return new LocalStateBuilder(this); - } - } - - private static final class LocalStateBuilder { - private String region; - - private RulePartition partitionResult; - - private String firstArn; - - private RuleArn parsedArn; - - LocalStateBuilder() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalStateBuilder(LocalState locals) { - this.region = locals.region; - this.partitionResult = locals.partitionResult; - this.firstArn = locals.firstArn; - this.parsedArn = locals.parsedArn; - } - - public LocalStateBuilder region(String value) { - this.region = value; - return this; - } - - public LocalStateBuilder partitionResult(RulePartition value) { - this.partitionResult = value; - return this; - } - - public LocalStateBuilder firstArn(String value) { - this.firstArn = value; - return this; - } - - public LocalStateBuilder parsedArn(RuleArn value) { - this.parsedArn = value; - return this; - } - - LocalState build() { - return new LocalState(this); - } - } } diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index e0bf9d33d946..e5e6ca742473 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 85ce78967705..7b8d2e010fb8 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 8c5ac2576624..ddaed044e16a 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 21f8dffeeef0..18e26a571a63 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT auth diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java index c0e46006c2a3..7b159a6a9489 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java @@ -74,10 +74,15 @@ private DefaultCredentialsProvider(Builder builder) { } /** - * Returns the singleton instance of the {@link DefaultCredentialsProvider} using the default configuration. - * Configuration can be specified by creating an instance using the {@link #builder()}. If you want to + * Returns the singleton instance of the {@link DefaultCredentialsProvider} using the default configuration. + * Configuration can be specified by creating an instance using the {@link #builder()}. If you want to * create a new instance, use {@link #builder()} instead. + * + * @deprecated The create() method that returns a singleton instance which can cause issues if one client closes the provider + * while others are still using it. Use {@code builder().build()} to create independent instances, which is the + * safer approach and recommended for most use cases. */ + @Deprecated public static DefaultCredentialsProvider create() { return DEFAULT_CREDENTIALS_PROVIDER; } diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 0e56bf96f692..e88bd33d6c36 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java index cde970af6402..a73691ab1df1 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.utils.builder.SdkBuilder; /** @@ -31,10 +32,12 @@ @SdkPublicApi public final class AwsRequestOverrideConfiguration extends RequestOverrideConfiguration { private final IdentityProvider credentialsProvider; + private final IdentityProvider tokenIdentityProvider; private AwsRequestOverrideConfiguration(BuilderImpl builder) { super(builder); this.credentialsProvider = builder.awsCredentialsProvider; + this.tokenIdentityProvider = builder.tokenIdentityProvider; } /** @@ -75,6 +78,16 @@ public Optional> credentialsI return Optional.ofNullable(credentialsProvider); } + /** + * The optional {@link IdentityProvider} that will provide a token identity to be used to + * authenticate this request. + * + * @return The optional {@link IdentityProvider}. + */ + public Optional> tokenIdentityProvider() { + return Optional.ofNullable(tokenIdentityProvider); + } + @Override public Builder toBuilder() { return new BuilderImpl(this); @@ -97,7 +110,8 @@ public boolean equals(Object o) { return false; } AwsRequestOverrideConfiguration that = (AwsRequestOverrideConfiguration) o; - return Objects.equals(credentialsProvider, that.credentialsProvider); + return Objects.equals(credentialsProvider, that.credentialsProvider) && + Objects.equals(tokenIdentityProvider, that.tokenIdentityProvider); } @Override @@ -105,6 +119,7 @@ public int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + super.hashCode(); hashCode = 31 * hashCode + Objects.hashCode(credentialsProvider); + hashCode = 31 * hashCode + Objects.hashCode(tokenIdentityProvider); return hashCode; } @@ -139,6 +154,17 @@ default Builder credentialsProvider(IdentityProvider} that will provide a token identity to be used + * to authenticate this request. + * + * @param tokenIdentityProvider The {@link IdentityProvider}. + * @return This object for chaining. + */ + default Builder tokenIdentityProvider(IdentityProvider tokenIdentityProvider) { + throw new UnsupportedOperationException(); + } + @Override AwsRequestOverrideConfiguration build(); } @@ -146,6 +172,7 @@ default Builder credentialsProvider(IdentityProvider implements Builder { private IdentityProvider awsCredentialsProvider; + private IdentityProvider tokenIdentityProvider; private BuilderImpl() { } @@ -157,6 +184,7 @@ private BuilderImpl(RequestOverrideConfiguration requestOverrideConfiguration) { private BuilderImpl(AwsRequestOverrideConfiguration awsRequestOverrideConfig) { super(awsRequestOverrideConfig); this.awsCredentialsProvider = awsRequestOverrideConfig.credentialsProvider; + this.tokenIdentityProvider = awsRequestOverrideConfig.tokenIdentityProvider; } @Override @@ -170,6 +198,12 @@ public AwsCredentialsProvider credentialsProvider() { return CredentialUtils.toCredentialsProvider(awsCredentialsProvider); } + @Override + public Builder tokenIdentityProvider(IdentityProvider tokenIdentityProvider) { + this.tokenIdentityProvider = tokenIdentityProvider; + return this; + } + @Override public AwsRequestOverrideConfiguration build() { return new AwsRequestOverrideConfiguration(this); diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java new file mode 100644 index 000000000000..5e7fb58f0d96 --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.auth; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * A resolver for the default value of auth scheme preference. This checks environment variables, + * system properties and the profile file for the relevant configuration options when + * {@link #resolveAuthSchemePreference()} is invoked. + */ +@SdkProtectedApi +public final class AuthSchemePreferenceResolver { + private final Supplier profileFile; + private final String profileName; + + private AuthSchemePreferenceResolver(Builder builder) { + this.profileFile = Validate.getOrDefault(builder.profileFile, () -> ProfileFile::defaultProfileFile); + this.profileName = Validate.getOrDefault(builder.profileName, + ProfileFileSystemSetting.AWS_PROFILE::getStringValueOrThrow); + } + + public static Builder builder() { + return new Builder(); + } + + /** + * Resolve the auth scheme preference based on the following order of precedence: + * 1. System settings (jvm and then environment). + * 2. Profile file + * + * @return The resolved, ordered list of auth scheme preferences or an empty list if no values are found. + */ + public List resolveAuthSchemePreference() { + List systemSettingList = fromSystemSetting(); + if (!CollectionUtils.isNullOrEmpty(systemSettingList)) { + return systemSettingList; + } + + List profileFilePrefList = fromProfileFile(); + if (!CollectionUtils.isNullOrEmpty(profileFilePrefList)) { + return profileFilePrefList; + } + + return Collections.emptyList(); + } + + private List fromSystemSetting() { + Optional value = SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.getStringValue(); + if (value.isPresent()) { + return parseAuthSchemeList(value.get()); + } + return Collections.emptyList(); + } + + private List fromProfileFile() { + ProfileFile profileFile = this.profileFile.get(); + + Optional profile = profileFile.profile(profileName); + + String unformattedAuthSchemePreferenceList = + profile + .flatMap(p -> p.property(ProfileProperty.AUTH_SCHEME_PREFERENCE)) + .orElse(null); + + return unformattedAuthSchemePreferenceList != null + ? parseAuthSchemeList(unformattedAuthSchemePreferenceList) + : Collections.emptyList(); + } + + public static final class Builder { + private Supplier profileFile; + private String profileName; + + public AuthSchemePreferenceResolver.Builder profileFile(Supplier profileFile) { + this.profileFile = profileFile; + return this; + } + + public AuthSchemePreferenceResolver.Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + + public AuthSchemePreferenceResolver build() { + return new AuthSchemePreferenceResolver(this); + } + } + + private static List parseAuthSchemeList(String unformattedList) { + if (StringUtils.isEmpty(unformattedList)) { + return Collections.emptyList(); + } + + return Arrays.asList(unformattedList.replaceAll("\\s+", "").split(",")); + } +} diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java index 63bbcaa6cdb0..4a71ba7681fb 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java @@ -21,6 +21,8 @@ import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.RESOLVED_CHECKSUM_SPECS; import static software.amazon.awssdk.core.internal.useragent.BusinessMetricsUtils.resolveRetryMode; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -36,6 +38,8 @@ import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; @@ -49,8 +53,11 @@ import software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute; import software.amazon.awssdk.core.internal.util.HttpChecksumResolver; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.useragent.BusinessMetricCollection; import software.amazon.awssdk.endpoints.EndpointProvider; +import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; @@ -69,7 +76,7 @@ private AwsExecutionContextBuilder() { */ public static ExecutionContext invokeInterceptorsAndCreateExecutionContext(ClientExecutionParams executionParams, - SdkClientConfiguration clientConfig) { + SdkClientConfiguration clientConfig) { // Note: This is currently copied to DefaultS3Presigner and other presigners. // Don't edit this without considering those @@ -134,13 +141,13 @@ private AwsExecutionContextBuilder() { putAuthSchemeResolutionAttributes(executionAttributes, clientConfig, originalRequest); ExecutionInterceptorChain executionInterceptorChain = - new ExecutionInterceptorChain(clientConfig.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + new ExecutionInterceptorChain(clientConfig.option(SdkClientOption.EXECUTION_INTERCEPTORS)); InterceptorContext interceptorContext = InterceptorContext.builder() - .request(originalRequest) - .asyncRequestBody(executionParams.getAsyncRequestBody()) - .requestBody(executionParams.getRequestBody()) - .build(); + .request(originalRequest) + .asyncRequestBody(executionParams.getAsyncRequestBody()) + .requestBody(executionParams.getRequestBody()) + .build(); interceptorContext = runInitialInterceptors(interceptorContext, executionAttributes, executionInterceptorChain); SdkRequest modifiedRequests = interceptorContext.request(); @@ -159,6 +166,8 @@ private AwsExecutionContextBuilder() { signer, executionAttributes, executionAttributes.getOptionalAttribute( AwsSignerExecutionAttribute.AWS_CREDENTIALS).orElse(null))); + putStreamingInputOutputTypesMetadata(executionAttributes, executionParams); + return ExecutionContext.builder() .interceptorChain(executionInterceptorChain) .interceptorContext(interceptorContext) @@ -168,6 +177,57 @@ private AwsExecutionContextBuilder() { .build(); } + private static void putStreamingInputOutputTypesMetadata( + ExecutionAttributes executionAttributes, ClientExecutionParams executionParams) { + List userAgentMetadata = new ArrayList<>(); + + if (executionParams.getRequestBody() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rb") + .value(ContentStreamProvider.ProviderType.shortValueFromName( + executionParams.getRequestBody().contentStreamProvider().name()) + ) + .build()); + } + + if (executionParams.getAsyncRequestBody() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rb") + .value(AsyncRequestBody.BodyType.shortValueFromName( + executionParams.getAsyncRequestBody().body()) + ) + .build()); + } + + if (executionParams.getResponseTransformer() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rt") + .value(ResponseTransformer.TransformerType.shortValueFromName( + executionParams.getResponseTransformer().name()) + ) + .build()); + } + + if (executionParams.getAsyncResponseTransformer() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rt") + .value(AsyncResponseTransformer.TransformerType.shortValueFromName( + executionParams.getAsyncResponseTransformer().name()) + ) + .build()); + } + + executionAttributes.putAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA, userAgentMetadata); + } + /** * We will load the old (non-SRA) signer if this client seems like an old version or the customer has provided a signer * override. We assume that if there's no auth schemes defined, we're on the old code path. @@ -217,9 +277,6 @@ private static void putAuthSchemeResolutionAttributes(ExecutionAttributes execut .putAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS, identityProviders); } - // TODO(sra-identity-and-auth): This is hard coding the logic for the credentialsIdentityProvider from - // AwsRequestOverrideConfiguration. Currently, AwsRequestOverrideConfiguration does not support overriding the - // tokenIdentityProvider. When adding that support this method will need to be updated. private static IdentityProviders resolveIdentityProviders(SdkRequest originalRequest, SdkClientConfiguration clientConfig) { IdentityProviders identityProviders = @@ -232,13 +289,17 @@ private static IdentityProviders resolveIdentityProviders(SdkRequest originalReq return null; } - return originalRequest.overrideConfiguration() - .filter(c -> c instanceof AwsRequestOverrideConfiguration) - .map(c -> (AwsRequestOverrideConfiguration) c) - .flatMap(AwsRequestOverrideConfiguration::credentialsIdentityProvider) - .map(identityProvider -> - identityProviders.copy(b -> b.putIdentityProvider(identityProvider))) - .orElse(identityProviders); + return originalRequest + .overrideConfiguration() + .filter(c -> c instanceof AwsRequestOverrideConfiguration) + .map(c -> (AwsRequestOverrideConfiguration) c) + .map(c -> { + return identityProviders.copy(b -> { + c.credentialsIdentityProvider().ifPresent(b::putIdentityProvider); + c.tokenIdentityProvider().ifPresent(b::putIdentityProvider); + }); + }) + .orElse(identityProviders); } /** @@ -277,12 +338,13 @@ private static MetricCollector resolveMetricCollector(ClientExecutionParams tokenIdentityProvider = StaticTokenProvider.create(() -> "test-token"); + + AwsRequestOverrideConfiguration configuration1 = AwsRequestOverrideConfiguration + .builder().tokenIdentityProvider(tokenIdentityProvider).build(); + + assertThat(configuration1.tokenIdentityProvider().get().resolveIdentity().join().token()) + .isEqualTo(tokenIdentityProvider.resolveIdentity().join().token()); + } + private void assertCredentialsEqual(AwsCredentialsProvider credentialsProvider, IdentityProvider identityProvider) { AwsCredentials creds1 = credentialsProvider.resolveCredentials(); diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java index 4f2bea548c95..e6ab211de5da 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java @@ -22,11 +22,13 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.File; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import org.junit.Before; @@ -36,6 +38,7 @@ import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; import software.amazon.awssdk.awscore.AwsRequest; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.client.config.AwsClientOption; @@ -43,6 +46,8 @@ import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.checksums.ChecksumSpecs; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -58,17 +63,19 @@ import software.amazon.awssdk.core.internal.util.HttpChecksumUtils; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; -import software.amazon.awssdk.http.auth.spi.signer.SignerProperty; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.profiles.ProfileFile; -import software.amazon.awssdk.regions.RegionScope; @RunWith(MockitoJUnitRunner.class) public class AwsExecutionContextBuilderTest { @@ -396,16 +403,24 @@ public void invokeInterceptorsAndCreateExecutionContext_withoutIdentityProviders public void invokeInterceptorsAndCreateExecutionContext_requestOverrideForIdentityProvider_updatesIdentityProviders() { IdentityProvider clientCredentialsProvider = StaticCredentialsProvider.create(AwsBasicCredentials.create("foo", "bar")); + IdentityProvider clientTokenProvider = StaticTokenProvider.create(() -> "client-token"); IdentityProviders identityProviders = - IdentityProviders.builder().putIdentityProvider(clientCredentialsProvider).build(); + IdentityProviders.builder() + .putIdentityProvider(clientCredentialsProvider) + .putIdentityProvider(clientTokenProvider) + .build(); SdkClientConfiguration clientConfig = testClientConfiguration() .option(SdkClientOption.IDENTITY_PROVIDERS, identityProviders) .build(); IdentityProvider requestCredentialsProvider = StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); + IdentityProvider requestTokenProvider = StaticTokenProvider.create(() -> "request-token"); Optional overrideConfiguration = - Optional.of(AwsRequestOverrideConfiguration.builder().credentialsProvider(requestCredentialsProvider).build()); + Optional.of(AwsRequestOverrideConfiguration.builder() + .credentialsProvider(requestCredentialsProvider) + .tokenIdentityProvider(requestTokenProvider) + .build()); when(sdkRequest.overrideConfiguration()).thenReturn(overrideConfiguration); ClientExecutionParams executionParams = clientExecutionParams(); @@ -420,6 +435,79 @@ public void invokeInterceptorsAndCreateExecutionContext_requestOverrideForIdenti actualIdentityProviders.identityProvider(AwsCredentialsIdentity.class); assertThat(actualIdentityProvider).isSameAs(requestCredentialsProvider); + + IdentityProvider actualTokenProvider = + actualIdentityProviders.identityProvider(TokenIdentity.class); + + assertThat(actualTokenProvider).isSameAs(requestTokenProvider); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withRequestBody_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withRequestBody(RequestBody.fromFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rb").value("f").build()) + ); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withResponseTransformer_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withResponseTransformer(ResponseTransformer.toFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rt").value("f").build()) + ); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withAsyncRequestBody_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withAsyncRequestBody(AsyncRequestBody.fromFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rb").value("f").build()) + ); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withAsyncResponseTransformer_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withAsyncResponseTransformer(AsyncResponseTransformer.toFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rt").value("f").build()) + ); } private ClientExecutionParams clientExecutionParams() { diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java new file mode 100644 index 000000000000..1f8526009cf8 --- /dev/null +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.internal.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.utils.StringInputStream; + +class AuthSchemePreferenceResolverTest { + + @AfterEach + void tearDown() { + System.clearProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property()); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("profileTestCases") + void profileParsingTests(String testName, String profileContent, String profileName, List expected) { + ProfileFile profileFile = ProfileFile.builder() + .type(ProfileFile.Type.CONFIGURATION) + .content(new StringInputStream(profileContent)) + .build(); + + AuthSchemePreferenceResolver.Builder resolverBuilder = AuthSchemePreferenceResolver.builder() + .profileFile(() -> profileFile); + if (profileName != null) { + resolverBuilder.profileName(profileName); + } + + assertThat(resolverBuilder.build().resolveAuthSchemePreference()).isEqualTo(expected); + } + + static Stream profileTestCases() { + return Stream.of( + Arguments.of( + "Default profile parsing", + "[default]\n" + ProfileProperty.AUTH_SCHEME_PREFERENCE + "=sigv4,bearer", + null, + Arrays.asList("sigv4", "bearer") + ), + Arguments.of( + "Custom profile parsing", + "[profile custom]\n" + ProfileProperty.AUTH_SCHEME_PREFERENCE + "=sigv4,bearer", + "custom", + Arrays.asList("sigv4", "bearer") + ), + Arguments.of( + "Profile with whitespace", + "[default]\n" + ProfileProperty.AUTH_SCHEME_PREFERENCE + "=sigv4, \tbearer \t", + null, + Arrays.asList("sigv4", "bearer") + ) + ); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("systemSettingTestCases") + void systemSettingParsingTests(String testName, String systemSetting, List expected) { + if (systemSetting != null) { + System.setProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property(), systemSetting); + } + + AuthSchemePreferenceResolver resolver = AuthSchemePreferenceResolver.builder().build(); + assertThat(resolver.resolveAuthSchemePreference()).isEqualTo(expected); + } + + static Stream systemSettingTestCases() { + return Stream.of( + Arguments.of("Basic system setting", "sigv4,bearer", Arrays.asList("sigv4", "bearer")), + Arguments.of("Empty system setting", "", Collections.emptyList()), + Arguments.of("No system setting", null, Collections.emptyList()), + + // Whitespace/formatting cases (from schemeParsingCases) + Arguments.of("Whitespace with tabs", "scheme1, scheme2 , \tscheme3 \t", + Arrays.asList("scheme1", "scheme2", "scheme3")), + Arguments.of("Whitespace with joined schemes", "scheme1, scheme2 \t scheme3 scheme4", + Arrays.asList("scheme1", "scheme2scheme3scheme4")), + Arguments.of("Whitespace in scheme names", "sigv4, sig v 4 a, bearer", + Arrays.asList("sigv4", "sigv4a", "bearer")) + ); + } +} \ No newline at end of file diff --git a/core/checksums-spi/pom.xml b/core/checksums-spi/pom.xml index e4ca12efd0b9..1c4885c4f731 100644 --- a/core/checksums-spi/pom.xml +++ b/core/checksums-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT checksums-spi diff --git a/core/checksums/pom.xml b/core/checksums/pom.xml index dfb5df3927c6..b6ddf3cee445 100644 --- a/core/checksums/pom.xml +++ b/core/checksums/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT checksums diff --git a/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java b/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java index a710ed0a0ef4..af539cde94ae 100644 --- a/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java +++ b/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java @@ -63,7 +63,17 @@ private Optional> getClass(String className) { return Optional.empty(); } }); - return classRef.map(WeakReference::get); + + // if the WeakReference to the class has been garbage collected, remove it from the cache and try again + if (classRef.isPresent()) { + Class clazz = classRef.get().get(); + if (clazz != null) { + return Optional.of(clazz); + } + classesByClassLoader.remove(classLoader); + return getClass(className); + } + return Optional.empty(); } /** diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index 41726ded72c4..2fc542c0702f 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index 4910447c34c4..8f36519c9449 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/http-auth-aws-crt/pom.xml b/core/http-auth-aws-crt/pom.xml index 4cf5761af916..e10d6f8af594 100644 --- a/core/http-auth-aws-crt/pom.xml +++ b/core/http-auth-aws-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT http-auth-aws-crt diff --git a/core/http-auth-aws-eventstream/pom.xml b/core/http-auth-aws-eventstream/pom.xml index e98a8c3648a2..a84894fd9c37 100644 --- a/core/http-auth-aws-eventstream/pom.xml +++ b/core/http-auth-aws-eventstream/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT http-auth-aws-eventstream diff --git a/core/http-auth-aws/pom.xml b/core/http-auth-aws/pom.xml index 0649d892822f..1e8c30c72911 100644 --- a/core/http-auth-aws/pom.xml +++ b/core/http-auth-aws/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT http-auth-aws diff --git a/core/http-auth-spi/pom.xml b/core/http-auth-spi/pom.xml index 983f28e6c4ca..040b3e894e16 100644 --- a/core/http-auth-spi/pom.xml +++ b/core/http-auth-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT http-auth-spi diff --git a/core/http-auth/pom.xml b/core/http-auth/pom.xml index b1c4b55d5aa4..5b2f58a3ea4b 100644 --- a/core/http-auth/pom.xml +++ b/core/http-auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT http-auth diff --git a/core/identity-spi/pom.xml b/core/identity-spi/pom.xml index 3b116af767ea..2a313d214ee3 100644 --- a/core/identity-spi/pom.xml +++ b/core/identity-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT identity-spi diff --git a/core/imds/pom.xml b/core/imds/pom.xml index 7acec9267dd8..e74e1b87d394 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index 3be77ca309cc..c2725b93af4a 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 7f4953632ea9..61bc43307137 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index 48e61cf6e602..4052a8e7b278 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 6e0f5bf9bdb2..41a59ddcd903 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT profiles diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java index 89ffa2b64ad2..5199e729da8f 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java @@ -85,6 +85,13 @@ public static Aggregator aggregator() { return new Aggregator(); } + /** + * Create an empty profile file. + */ + static ProfileFile empty() { + return new ProfileFile(Collections.emptyMap()); + } + /** * Get the default profile file, using the credentials file from "~/.aws/credentials", the config file from "~/.aws/config" * and the "default" profile. This default behavior can be customized using the @@ -310,8 +317,10 @@ public void setType(Type type) { @Override public ProfileFile build() { + Validate.isTrue(content != null || contentLocation != null, + "content or contentLocation must be set."); InputStream stream = content != null ? content : - FunctionalUtils.invokeSafely(() -> Files.newInputStream(contentLocation)); + FunctionalUtils.invokeSafely(() -> Files.newInputStream(contentLocation)); Validate.paramNotNull(type, "type"); Validate.paramNotNull(stream, "content"); diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java index 4dec2883c814..7046c300d339 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java @@ -55,13 +55,15 @@ static ProfileFileSupplier defaultSupplier() { = ProfileFileLocation.configurationFileLocation() .map(path -> reloadWhenModified(path, ProfileFile.Type.CONFIGURATION)); - ProfileFileSupplier supplier = () -> ProfileFile.builder().build(); + ProfileFileSupplier supplier; if (credentialsSupplierOptional.isPresent() && configurationSupplierOptional.isPresent()) { supplier = aggregate(credentialsSupplierOptional.get(), configurationSupplierOptional.get()); } else if (credentialsSupplierOptional.isPresent()) { supplier = credentialsSupplierOptional.get(); } else if (configurationSupplierOptional.isPresent()) { supplier = configurationSupplierOptional.get(); + } else { + supplier = fixedProfileFile(ProfileFile.empty()); } return supplier; diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index 434e27b3b6f2..cd97c6047a55 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -151,6 +151,8 @@ public final class ProfileProperty { public static final String USE_DUALSTACK_ENDPOINT = "use_dualstack_endpoint"; + public static final String AUTH_SCHEME_PREFERENCE = "auth_scheme_preference"; + public static final String USE_FIPS_ENDPOINT = "use_fips_endpoint"; public static final String EC2_METADATA_SERVICE_ENDPOINT_MODE = "ec2_metadata_service_endpoint_mode"; diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java index 799aa5880882..60a91a25527f 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java @@ -39,6 +39,7 @@ public final class ProfileFileRefresher { private static final ProfileFileRefreshRecord EMPTY_REFRESH_RECORD = ProfileFileRefreshRecord.builder() .refreshTime(Instant.MIN) .build(); + private static final long STALE_TIME_MS = 1000; private final CachedSupplier profileFileCache; private volatile ProfileFileRefreshRecord currentRefreshRecord; private final Supplier profileFile; @@ -96,7 +97,7 @@ private RefreshResult reloadAsRefreshResultIfStale() { refreshRecord = currentRefreshRecord; } - return wrapIntoRefreshResult(refreshRecord, now); + return wrapIntoRefreshResult(refreshRecord, now.plusMillis(STALE_TIME_MS)); } private RefreshResult wrapIntoRefreshResult(T value, Instant staleTime) { diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java index bc39916da5d9..e5ca165840ae 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java @@ -48,6 +48,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.EnabledForJreRange; import org.junit.jupiter.api.condition.JRE; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringInputStream; import software.amazon.awssdk.testutils.LogCaptor; @@ -581,6 +582,15 @@ public void checkPermission(Permission perm) { } } + @Test + public void defaultSupplier_noCredentialsFiles_returnsEmptyProvider() { + EnvironmentVariableHelper.run(environmentVariableHelper -> { + environmentVariableHelper.set(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE, "no-such-file"); + environmentVariableHelper.set(ProfileFileSystemSetting.AWS_CONFIG_FILE, "no-such-file"); + ProfileFileSupplier supplier = ProfileFileSupplier.defaultSupplier(); + assertThat(supplier.get().profiles()).isEmpty(); + }); + } private Path writeTestFile(String contents, Path path) { try { diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java index e827e07dbd8b..9d2310c69d2b 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java @@ -570,6 +570,14 @@ public void returnsEmptyMap_when_AwsFilesDoNotExist() { assertThat(missingProfile.profiles()).isInstanceOf(Map.class); } + @Test + public void builderValidatesContentRequired() { + assertThatThrownBy(() -> ProfileFile.builder().type(ProfileFile.Type.CONFIGURATION).build()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("content or contentLocation must be set."); + + } + private ProfileFile configFile(String configFile) { return ProfileFile.builder() .content(configFile) diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java index 69e86f937484..97bf3e9aa707 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java @@ -29,11 +29,13 @@ import java.time.ZoneOffset; import java.time.temporal.TemporalAmount; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.logging.log4j.Level; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.testutils.LogCaptor; public class ProfileFileRefresherTest { @@ -63,43 +65,47 @@ void refreshIfStale_profileModifiedNoPathSpecified_doesNotReloadProfileFile() { ProfileFileRefresher refresher = refresherWithClock(clock) .profileFile(() -> profileFile(credentialsFilePath)) .build(); - Duration intervalWithinJitter = Duration.ofMillis(100); + Duration intervalWithinStale = Duration.ofMillis(100); ProfileFile file1 = refresher.refreshIfStale(); generateTestCredentialsFile("modifiedAccessKey", "modifiedSecretAccessKey"); updateModificationTime(credentialsFilePath, clock.instant().plusMillis(1)); - clock.tickForward(intervalWithinJitter); + clock.tickForward(intervalWithinStale); ProfileFile file2 = refresher.refreshIfStale(); Assertions.assertThat(file2).isSameAs(file1); } @Test - void refreshIfStale_profileModifiedWithinJitterPeriod_doesNotReloadProfileFile() { - Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); + void refreshIfStale_profileModifiedWithinStalePeriod_doesNotReloadProfileFile() { + try (LogCaptor logCaptor = LogCaptor.create(Level.WARN)) { + Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); - AdjustableClock clock = new AdjustableClock(); - ProfileFileRefresher refresher = refresherWithClock(clock) - .profileFile(() -> profileFile(credentialsFilePath)) - .profileFilePath(credentialsFilePath) - .build(); - Duration intervalWithinJitter = Duration.ofMillis(100); + AdjustableClock clock = new AdjustableClock(); + ProfileFileRefresher refresher = refresherWithClock(clock) + .profileFile(() -> profileFile(credentialsFilePath)) + .profileFilePath(credentialsFilePath) + .build(); + Duration intervalWithinStale = Duration.ofMillis(100); - ProfileFile file1 = refresher.refreshIfStale(); + ProfileFile file1 = refresher.refreshIfStale(); - clock.tickForward(intervalWithinJitter); - generateTestCredentialsFile("modifiedAccessKey", "modifiedSecretAccessKey"); - updateModificationTime(credentialsFilePath, clock.instant()); + clock.tickForward(intervalWithinStale); + generateTestCredentialsFile("modifiedAccessKey", "modifiedSecretAccessKey"); + updateModificationTime(credentialsFilePath, clock.instant()); - ProfileFile file2 = refresher.refreshIfStale(); + ProfileFile file2 = refresher.refreshIfStale(); - Assertions.assertThat(file2).isSameAs(file1); + Assertions.assertThat(file2).isSameAs(file1); + + Assertions.assertThat(logCaptor.loggedEvents()).isEmpty(); + } } @Test - void refreshIfStale_profileModifiedOutsideJitterPeriod_reloadsProfileFile() { + void refreshIfStale_profileModifiedOutsideStalePeriod_reloadsProfileFile() { Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); AdjustableClock clock = new AdjustableClock(); diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 5ed35e1353c5..8b58584a8f3b 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 2ecb51860694..8f7852a40288 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 2aa93deb9e4e..65f330af62f5 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index 9185092b8827..93837a760bbf 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index ed4568c00df7..23d07ed1c129 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index ed1e6b9bf97b..5656e9811ac3 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/protocols/smithy-rpcv2-protocol/pom.xml b/core/protocols/smithy-rpcv2-protocol/pom.xml index bddbab75877b..c81c7fd93ead 100644 --- a/core/protocols/smithy-rpcv2-protocol/pom.xml +++ b/core/protocols/smithy-rpcv2-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index e14c2e747d8a..2dff69bd1093 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 9b31643e033a..60495313a643 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -29,6 +29,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, @@ -135,6 +138,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "access-analyzer.ap-northeast-1.api.aws", @@ -411,6 +415,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -522,6 +527,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -649,6 +655,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -991,6 +998,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" @@ -1923,6 +1931,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2045,6 +2054,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2081,6 +2091,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2188,6 +2199,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2993,6 +3005,8 @@ "protocols" : [ "https" ] }, "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3018,17 +3032,21 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "eu-central-1" : { "variants" : [ { "tags" : [ "dualstack" ] } ] }, + "eu-central-2" : { }, "eu-north-1" : { "variants" : [ { "tags" : [ "dualstack" ] } ] }, + "eu-south-1" : { }, "eu-west-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3044,6 +3062,7 @@ "tags" : [ "dualstack" ] } ] }, + "me-central-1" : { }, "sa-east-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3091,6 +3110,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3458,6 +3478,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3595,6 +3616,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3662,6 +3684,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -4521,6 +4544,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "cloudcontrolapi.ap-northeast-1.api.aws", @@ -4798,6 +4822,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -5079,6 +5104,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -5392,6 +5418,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -6475,6 +6502,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -7158,6 +7186,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "datasync.ap-northeast-1.api.aws", @@ -7428,6 +7457,7 @@ } ] }, "endpoints" : { + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "datazone.ap-northeast-1.api.aws" }, @@ -7641,6 +7671,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -7770,6 +7801,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "dlm.ap-northeast-1.api.aws", @@ -7974,6 +8006,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8354,6 +8387,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8469,6 +8503,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8590,6 +8625,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "ec2.ap-northeast-1.api.aws", @@ -8778,6 +8814,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8879,6 +8916,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8975,6 +9013,7 @@ "ap-east-1" : { "hostname" : "eks-auth.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "eks-auth.ap-northeast-1.api.aws" }, @@ -9071,6 +9110,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -9366,6 +9406,7 @@ "tags" : [ "fips" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", @@ -9779,6 +9820,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -9867,6 +9909,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -10284,6 +10327,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "aos.ap-northeast-1.api.aws", @@ -10527,6 +10571,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "events.ap-northeast-1.api.aws", @@ -10829,6 +10874,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "firehose.ap-northeast-1.api.aws", @@ -11698,6 +11744,7 @@ "ap-east-1" : { "hostname" : "gameliftstreams.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "gameliftstreams.ap-northeast-1.api.aws" }, @@ -11922,6 +11969,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "glue.ap-northeast-1.api.aws", @@ -12531,6 +12579,7 @@ "endpoints" : { "ap-south-1" : { }, "ap-southeast-2" : { }, + "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -12626,6 +12675,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -12873,6 +12923,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "internetmonitor.ap-northeast-1.api.aws", "variants" : [ { @@ -13986,6 +14037,7 @@ "ap-east-1" : { "hostname" : "kendra-ranking.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "kendra-ranking.ap-northeast-1.api.aws" }, @@ -14092,6 +14144,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -14335,6 +14388,7 @@ "deprecated" : true, "hostname" : "kms-fips.ap-east-1.amazonaws.com" }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "kms-fips.ap-northeast-1.amazonaws.com", @@ -14989,6 +15043,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "lambda.ap-northeast-1.api.aws", @@ -15495,6 +15550,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "logs.ap-northeast-1.api.aws", @@ -16762,6 +16818,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -17051,6 +17108,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -17477,6 +17535,7 @@ "ap-east-1" : { "hostname" : "notifications.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "notifications.ap-northeast-1.api.aws" }, @@ -17585,6 +17644,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -18086,6 +18146,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "protocols" : [ "https" ], "variants" : [ { @@ -18957,6 +19018,7 @@ "ap-east-1" : { "hostname" : "qbusiness.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "qbusiness.ap-northeast-1.api.aws" }, @@ -19160,6 +19222,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -19404,6 +19467,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "rbin.ap-northeast-1.api.aws", @@ -19658,6 +19722,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -19935,6 +20000,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -20626,6 +20692,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -20721,6 +20788,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -21055,6 +21123,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "route53resolver.ap-northeast-1.api.aws", @@ -21524,6 +21593,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "s3.ap-northeast-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ], @@ -22421,6 +22491,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -22498,6 +22569,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -23312,6 +23384,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "servicediscovery.ap-northeast-1.api.aws", @@ -23579,6 +23652,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -24643,6 +24717,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "sns.ap-northeast-1.api.aws", @@ -24893,6 +24968,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "sqs.ap-northeast-1.api.aws", @@ -25140,6 +25216,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -25832,6 +25909,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -25940,6 +26018,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26054,6 +26133,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26097,6 +26177,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26206,6 +26287,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26324,6 +26406,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "synthetics.ap-northeast-1.api.aws", @@ -26588,6 +26671,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -28921,6 +29005,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -30435,6 +30520,12 @@ }, "isRegionalized" : true }, + "scheduler" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "schemas" : { "endpoints" : { "cn-north-1" : { }, @@ -37789,6 +37880,11 @@ "us-isob-east-1" : { } } }, + "athena" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "autoscaling" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -38136,6 +38232,11 @@ } } }, + "lakeformation" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "lambda" : { "endpoints" : { "us-isob-east-1" : { } @@ -38882,6 +38983,11 @@ } } }, + "license-manager" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, "logs" : { "endpoints" : { "eu-isoe-west-1" : { } @@ -38963,6 +39069,11 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-iso-e-global" }, + "route53profiles" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, "route53resolver" : { "endpoints" : { "eu-isoe-west-1" : { } diff --git a/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java b/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java index 49f057b746c2..d6d5a94deb97 100644 --- a/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java +++ b/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java @@ -66,4 +66,33 @@ public void idIsUrlEncoded() { Region region = Region.of("http://my-host.com/?"); assertThat(region.id()).isEqualTo("http%3A%2F%2Fmy-host.com%2F%3F"); } + + @Test + public void globalRegionIsRecognized() { + Region globalRegion = Region.of("aws-global"); + assertThat(globalRegion.id()).isEqualTo("aws-global"); + assertSame(Region.AWS_GLOBAL, globalRegion); + } + + @Test + public void multipleGlobalRegionsAreSupported() { + Region awsGlobal = Region.of("aws-global"); + Region s3Global = Region.of("s3-global"); + + assertThat(awsGlobal.id()).isEqualTo("aws-global"); + assertThat(s3Global.id()).isEqualTo("s3-global"); + + assertSame(Region.of("aws-global"), awsGlobal); + assertSame(Region.of("s3-global"), s3Global); + } + + @Test + public void allPartitionGlobalRegionsAreRecognized() { + assertThat(Region.of("aws-global").id()).isEqualTo("aws-global"); + assertThat(Region.of("aws-cn-global").id()).isEqualTo("aws-cn-global"); + assertThat(Region.of("aws-us-gov-global").id()).isEqualTo("aws-us-gov-global"); + assertThat(Region.of("aws-iso-global").id()).isEqualTo("aws-iso-global"); + assertThat(Region.of("aws-iso-b-global").id()).isEqualTo("aws-iso-b-global"); + assertThat(Region.of("aws-iso-f-global").id()).isEqualTo("aws-iso-f-global"); + } } diff --git a/core/retries-spi/pom.xml b/core/retries-spi/pom.xml index 79bd154cee59..5a44ceb3e010 100644 --- a/core/retries-spi/pom.xml +++ b/core/retries-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 2b664bdc8daf..edc0f7e54ade 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -21,7 +21,7 @@ core software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index b6afbe5b7a00..12cbe06d2d8d 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java index f55eb73cbc7f..65889c2d08fd 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java @@ -256,8 +256,14 @@ public enum SdkSystemSetting implements SystemSetting { * Configure the SIGV4A signing region set. * This is a non-empty, comma-delimited list of AWS region names used during signing. */ - AWS_SIGV4A_SIGNING_REGION_SET("aws.sigv4a.signing.region.set", null) - ; + AWS_SIGV4A_SIGNING_REGION_SET("aws.sigv4a.signing.region.set", null), + + + /** + * Configure the preferred auth scheme to use. + * This is a comma-delimited list of AWS auth scheme names used during signing. + */ + AWS_AUTH_SCHEME_PREFERENCE("aws.authSchemePreference", null); private final String systemProperty; private final String defaultValue; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 752e0032958f..6a3bbfee3caf 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -23,11 +23,13 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.Arrays; +import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.FileRequestBodyConfiguration; import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; @@ -37,6 +39,7 @@ import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where this interface is @@ -74,6 +77,16 @@ default String contentType() { return Mimetype.MIMETYPE_OCTET_STREAM; } + /** + * Each AsyncRequestBody should return a well-formed name that can be used to identify the implementation. + * The body name should only include alphanumeric characters. + * + * @return String containing the identifying name of this AsyncRequestBody implementation. + */ + default String body() { + return BodyType.UNKNOWN.getName(); + } + /** * Creates an {@link AsyncRequestBody} the produces data from the input ByteBuffer publisher. The data is delivered when the * publisher publishes the data. @@ -96,6 +109,11 @@ public Optional contentLength() { public void subscribe(Subscriber s) { publisher.subscribe(s); } + + @Override + public String body() { + return BodyType.PUBLISHER.getName(); + } }; } @@ -513,4 +531,36 @@ default SdkPublisher split(Consumer VALUE_MAP = + EnumUtils.uniqueIndex(BodyType.class, BodyType::getName); + + private final String name; + private final String shortValue; + + BodyType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java index 6550497d52ed..d7c872d89289 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java @@ -19,8 +19,10 @@ import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.file.Path; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.FileTransformerConfiguration; import software.amazon.awssdk.core.ResponseBytes; @@ -36,6 +38,7 @@ import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.CopyableBuilder; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Callback interface to handle a streaming asynchronous response. @@ -158,6 +161,16 @@ default SplitResult split(Consumer Builder resultFuture(CompletableFuture future); } } + + @SdkProtectedApi + enum TransformerType { + FILE("File", "f"), + BYTES("Bytes", "b"), + STREAM("Stream", "s"), + PUBLISHER("Publisher", "p"), + UNKNOWN("Unknown", "u"); + + private static final Map VALUE_MAP = + EnumUtils.uniqueIndex(TransformerType.class, TransformerType::getName); + + private final String name; + private final String shortValue; + + TransformerType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java index 3639d82c04c1..deb354d276dd 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java @@ -120,6 +120,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.STREAM.getName(); + } + private void waitForSubscriptionIfNeeded() throws InterruptedException { long timeoutSeconds = subscribeTimeout.getSeconds(); if (!subscribedLatch.await(timeoutSeconds, TimeUnit.SECONDS)) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java index c0a044ffa3ba..f6bc15844729 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java @@ -92,6 +92,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.STREAM.getName(); + } + private void waitForSubscriptionIfNeeded() { try { long timeoutSeconds = subscribeTimeout.getSeconds(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java index b189d51ec7ef..c7ee37690ca0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java @@ -76,6 +76,10 @@ final class NotifyingAsyncResponseTransformer implements Asy this.listener = Validate.notNull(listener, "listener"); } + public AsyncResponseTransformer getDelegate() { + return delegate; + } + @Override public CompletableFuture prepare() { return delegate.prepare(); @@ -99,6 +103,11 @@ public void exceptionOccurred(Throwable error) { delegate.exceptionOccurred(error); } + @Override + public String name() { + return delegate.name(); + } + static void invoke(Runnable runnable, String callbackName) { try { runnable.run(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java index 81e9d4209ed8..e307f5857ce7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.core.SdkProtocolMetadata; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.http.HttpResponseHandler; @@ -30,6 +31,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.runtime.transform.Marshaller; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.metrics.MetricCollector; /** @@ -49,6 +51,8 @@ public final class ClientExecutionParams { private HttpResponseHandler responseHandler; private HttpResponseHandler errorResponseHandler; private HttpResponseHandler> combinedResponseHandler; + private ResponseTransformer responseTransformer; + private AsyncResponseTransformer asyncResponseTransformer; private boolean fullDuplex; private boolean hasInitialRequestEvent; private String hostPrefixExpression; @@ -133,6 +137,25 @@ public ClientExecutionParams withAsyncRequestBody(AsyncRequestB return this; } + public ResponseTransformer getResponseTransformer() { + return responseTransformer; + } + + public ClientExecutionParams withResponseTransformer(ResponseTransformer responseTransformer) { + this.responseTransformer = responseTransformer; + return this; + } + + public AsyncResponseTransformer getAsyncResponseTransformer() { + return asyncResponseTransformer; + } + + public ClientExecutionParams withAsyncResponseTransformer( + AsyncResponseTransformer asyncResponseTransformer) { + this.asyncResponseTransformer = asyncResponseTransformer; + return this; + } + public boolean isFullDuplex() { return fullDuplex; } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index 08f890bd8333..bd09c48d9f43 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.core.interceptor; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -28,6 +29,7 @@ import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.useragent.BusinessMetricCollection; import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.endpoints.EndpointProvider; @@ -55,6 +57,12 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute BUSINESS_METRICS = new ExecutionAttribute<>("BusinessMetricsCollection"); + /** + * A collection of metadata to be added to the UserAgent. + */ + public static final ExecutionAttribute> USER_AGENT_METADATA = + new ExecutionAttribute<>("UserAgentMetadata"); + /** * If true, indicates that this is an event streaming request being sent over RPC, and therefore the serialized * request object is encapsulated as an event of type {@code initial-request}. @@ -189,6 +197,13 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute RESPONSE_CHECKSUM_VALIDATION = new ExecutionAttribute<>( "ResponseChecksumValidation"); + /** + * The token configured from the environment or system properties, used to determine if the BEARER_SERVICE_ENV_VARS + * business metric should be set. + */ + public static final ExecutionAttribute TOKEN_CONFIGURED_FROM_ENV = new ExecutionAttribute<>( + "TokenConfiguredFromEnv"); + /** * The backing attribute for RESOLVED_CHECKSUM_SPECS. * This holds the real ChecksumSpecs value, and is used to map to the ChecksumAlgorithm signer property diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java index 90d587cd5a36..d1103ea2a2de 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java @@ -65,6 +65,11 @@ public void exceptionOccurred(Throwable throwable) { cf.completeExceptionally(throwable); } + @Override + public String name() { + return TransformerType.BYTES.getName(); + } + static class BaosSubscriber implements Subscriber { private final CompletableFuture resultFuture; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java index 87540e5363ba..a4ee21fe0238 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java @@ -118,6 +118,11 @@ public void cancel() { } } + @Override + public String body() { + return BodyType.BYTES.getName(); + } + public static ByteBuffersAsyncRequestBody of(ByteBuffer... buffers) { long length = Arrays.stream(buffers) .mapToLong(ByteBuffer::remaining) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java index f8bbdd552088..f5dcc164f61c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java @@ -141,6 +141,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.FILE.getName(); + } + /** * @return Builder instance to construct a {@link FileAsyncRequestBody}. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java index 9d0bdf560af2..4348355fa5d8 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java @@ -169,6 +169,11 @@ public void exceptionOccurred(Throwable throwable) { } } + @Override + public String name() { + return TransformerType.FILE.getName(); + } + /** * {@link Subscriber} implementation that writes chunks to a file. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java index 434894a44c8c..72ece7a26575 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java @@ -59,4 +59,9 @@ public void onStream(SdkPublisher publisher) { public void exceptionOccurred(Throwable error) { future.completeExceptionally(error); } + + @Override + public String name() { + return TransformerType.STREAM.getName(); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java index fc742536ec88..7ee81817ed11 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java @@ -86,6 +86,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.STREAM.getName(); + } + private void tryReset(InputStream inputStream) { try { inputStream.reset(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java index d5448a5addcd..3f0f9fa19fce 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java @@ -57,4 +57,9 @@ public void onStream(SdkPublisher publisher) { public void exceptionOccurred(Throwable error) { future.completeExceptionally(error); } + + @Override + public String name() { + return TransformerType.PUBLISHER.getName(); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java index 1f6e2b9949d3..ef1e3fb2cc9d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java @@ -41,6 +41,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; import software.amazon.awssdk.core.internal.useragent.IdentityProviderNameMapping; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.useragent.BusinessMetricCollection; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.identity.spi.Identity; @@ -110,6 +111,13 @@ private String finalizeUserAgent(RequestExecutionContext context) { javaUserAgent.append(clientUserAgent); + //add useragent metadata from execution context + List userAgentMetadata = + context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA); + if (userAgentMetadata != null) { + userAgentMetadata.forEach(s -> javaUserAgent.append(SPACE).append(s)); + } + //add remaining SDK user agent properties identityProviderName(context.executionAttributes()).ifPresent( authSource -> appendSpaceAndField(javaUserAgent, CONFIG_METADATA, uaPair(AUTH_SOURCE, authSource))); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java index 856f528eadb5..1072ce98caa9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java @@ -61,6 +61,11 @@ public InputStream newStream() { return bufferedStream; } + @Override + public String name() { + return ProviderType.STREAM.getName(); + } + class ByteArrayStream extends ByteArrayInputStream { ByteArrayStream(byte[] buf, int offset, int length) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java index 69d277960d4c..fb6262b28c26 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java @@ -42,6 +42,11 @@ public InputStream newStream() { return currentStream; } + @Override + public String name() { + return ProviderType.FILE.getName(); + } + private void closeCurrentStream() { if (currentStream != null) { invokeSafely(currentStream::close); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java index e751a8e0d631..ae30c95e615c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java @@ -20,7 +20,6 @@ import static software.amazon.awssdk.utils.Validate.paramNotNull; import static software.amazon.awssdk.utils.Validate.validState; -import java.io.ByteArrayInputStream; import java.io.File; import java.io.InputStream; import java.nio.ByteBuffer; @@ -138,12 +137,21 @@ public static RequestBody fromFile(File file) { public static RequestBody fromInputStream(InputStream inputStream, long contentLength) { IoUtils.markStreamWithMaxReadLimit(inputStream); InputStream nonCloseable = nonCloseableInputStream(inputStream); - return fromContentProvider(() -> { - if (nonCloseable.markSupported()) { - invokeSafely(nonCloseable::reset); + ContentStreamProvider provider = new ContentStreamProvider() { + @Override + public InputStream newStream() { + if (nonCloseable.markSupported()) { + invokeSafely(nonCloseable::reset); + } + return nonCloseable; } - return nonCloseable; - }, contentLength, Mimetype.MIMETYPE_OCTET_STREAM); + + @Override + public String name() { + return ProviderType.STREAM.getName(); + } + }; + return fromContentProvider(provider, contentLength, Mimetype.MIMETYPE_OCTET_STREAM); } /** @@ -268,7 +276,7 @@ private static RequestBody fromBytesDirect(byte[] bytes) { * Creates a {@link RequestBody} using the specified bytes (without copying). */ private static RequestBody fromBytesDirect(byte[] bytes, String mimetype) { - return new RequestBody(() -> new ByteArrayInputStream(bytes), (long) bytes.length, mimetype); + return new RequestBody(ContentStreamProvider.fromByteArrayUnsafe(bytes), (long) bytes.length, mimetype); } private static InputStream nonCloseableInputStream(InputStream inputStream) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java index 3aa8ecac2698..f883b591829e 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java @@ -26,6 +26,8 @@ import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.ResponseInputStream; @@ -37,6 +39,7 @@ import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Interface for processing a streaming response from a service in a synchronous fashion. This interfaces gives @@ -93,6 +96,16 @@ default boolean needsConnectionLeftOpen() { return false; } + /** + * Each ResponseTransformer should return a well-formed name that can be used to identify the implementation. + * The Transformer name should only include alphanumeric characters. + * + * @return String containing the identifying name of this RequestTransformer. + */ + default String name() { + return TransformerType.UNKNOWN.name(); + } + /** * Creates a response transformer that writes all response content to the specified file. If the file already exists * then a {@link java.nio.file.FileAlreadyExistsException} will be thrown. @@ -102,34 +115,42 @@ default boolean needsConnectionLeftOpen() { * @return ResponseTransformer instance. */ static ResponseTransformer toFile(Path path) { - return (resp, in) -> { - try { - InterruptMonitor.checkInterrupted(); - Files.copy(in, path); - return resp; - } catch (IOException copyException) { - String copyError = "Failed to read response into file: " + path; + return new ResponseTransformer() { + @Override + public ResponseT transform(ResponseT response, AbortableInputStream inputStream) throws Exception { + try { + InterruptMonitor.checkInterrupted(); + Files.copy(inputStream, path); + return response; + } catch (IOException copyException) { + String copyError = "Failed to read response into file: " + path; - if (shouldThrowIOException(copyException)) { - throw new IOException(copyError, copyException); - } + if (shouldThrowIOException(copyException)) { + throw new IOException(copyError, copyException); + } - // Try to clean up the file so that we can retry the request. If we can't delete it, don't retry the request. - try { - Files.deleteIfExists(path); - } catch (IOException deletionException) { - Logger.loggerFor(ResponseTransformer.class) - .error(() -> "Failed to delete destination file '" + path + - "' after reading the service response " + - "failed.", deletionException); - - throw new IOException(copyError + ". Additionally, the file could not be cleaned up (" + - deletionException.getMessage() + "), so the request will not be retried.", - copyException); + // Try to clean up the file so that we can retry the request. If we can't delete it, don't retry the request. + try { + Files.deleteIfExists(path); + } catch (IOException deletionException) { + Logger.loggerFor(ResponseTransformer.class) + .error(() -> "Failed to delete destination file '" + path + + "' after reading the service response " + + "failed.", deletionException); + + throw new IOException(copyError + ". Additionally, the file could not be cleaned up (" + + deletionException.getMessage() + "), so the request will not be retried.", + copyException); + } + + // Retry the request + throw RetryableException.builder().message(copyError).cause(copyException).build(); } + } - // Retry the request - throw RetryableException.builder().message(copyError).cause(copyException).build(); + @Override + public String name() { + return TransformerType.FILE.getName(); } }; } @@ -166,10 +187,18 @@ static ResponseTransformer toFile(File file) { * @return ResponseTransformer instance. */ static ResponseTransformer toOutputStream(OutputStream outputStream) { - return (resp, in) -> { - InterruptMonitor.checkInterrupted(); - IoUtils.copy(in, outputStream); - return resp; + return new ResponseTransformer() { + @Override + public ResponseT transform(ResponseT response, AbortableInputStream inputStream) throws Exception { + InterruptMonitor.checkInterrupted(); + IoUtils.copy(inputStream, outputStream); + return response; + } + + @Override + public String name() { + return TransformerType.STREAM.getName(); + } }; } @@ -181,12 +210,20 @@ static ResponseTransformer toOutputStream(Outp * @return The streaming response transformer that can be used on the client streaming method. */ static ResponseTransformer> toBytes() { - return (response, inputStream) -> { - try { - InterruptMonitor.checkInterrupted(); - return ResponseBytes.fromByteArrayUnsafe(response, IoUtils.toByteArray(inputStream)); - } catch (IOException e) { - throw RetryableException.builder().message("Failed to read response.").cause(e).build(); + return new ResponseTransformer>() { + @Override + public ResponseBytes transform(ResponseT response, AbortableInputStream inputStream) throws Exception { + try { + InterruptMonitor.checkInterrupted(); + return ResponseBytes.fromByteArrayUnsafe(response, IoUtils.toByteArray(inputStream)); + } catch (IOException e) { + throw RetryableException.builder().message("Failed to read response.").cause(e).build(); + } + } + + @Override + public String name() { + return TransformerType.BYTES.getName(); } }; } @@ -194,7 +231,7 @@ static ResponseTransformer> toBy /** * Creates a response transformer that returns an unmanaged input stream with the response content. This input stream must * be explicitly closed to release the connection. The unmarshalled response object can be obtained via the {@link - * ResponseInputStream#response} method. + * ResponseInputStream#response()} method. *

    * Note that the returned stream is not subject to the retry policy or timeout settings (except for socket timeout) * of the client. No retries will be performed in the event of a socket read failure or connection reset. @@ -203,7 +240,17 @@ static ResponseTransformer> toBy * @return ResponseTransformer instance. */ static ResponseTransformer> toInputStream() { - return unmanaged(ResponseInputStream::new); + return unmanaged(new ResponseTransformer>() { + @Override + public ResponseInputStream transform(ResponseT response, AbortableInputStream inputStream) { + return new ResponseInputStream<>(response, inputStream); + } + + @Override + public String name() { + return TransformerType.STREAM.getName(); + } + }); } /** @@ -228,7 +275,43 @@ public ReturnT transform(ResponseT response, AbortableInputStream inputStream) t public boolean needsConnectionLeftOpen() { return true; } + + @Override + public String name() { + return transformer.name(); + } }; + } + + @SdkProtectedApi + enum TransformerType { + FILE("File", "f"), + BYTES("Bytes", "b"), + STREAM("Stream", "s"), + UNKNOWN("Unknown", "u"); + + private static final Map VALUE_MAP = + EnumUtils.uniqueIndex(TransformerType.class, TransformerType::getName); + + private final String name; + private final String shortValue; + + + TransformerType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java new file mode 100644 index 000000000000..dc9eff087bdf --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.useragent; + +import static software.amazon.awssdk.utils.Validate.notNull; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.internal.useragent.UserAgentConstant; + +/** + * Represents UserAgent additional metadata following the format: md/[name]#[value] + */ +@SdkProtectedApi +public final class AdditionalMetadata { + private final String name; + private final String value; + + private AdditionalMetadata(BuilderImpl b) { + this.name = notNull(b.name, "name must not be null"); + this.value = notNull(b.value, "value must not be null"); + } + + public String name() { + return name; + } + + public String value() { + return value; + } + + @Override + public String toString() { + // Format "md/{name}#{value}" + return UserAgentConstant.field( + UserAgentConstant.METADATA, + UserAgentConstant.uaPair(name, value)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AdditionalMetadata that = (AdditionalMetadata) o; + return Objects.equals(name, that.name) && + Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public interface Builder { + /** + * Set the name of the additional metadata. + * + * @param name The name. + * @return This object for method chaining. + */ + Builder name(String name); + + /** + * Set the value of the additional metadata. + * + * @param value The value. + * @return This object for method chaining. + */ + Builder value(String value); + + AdditionalMetadata build(); + } + + private static final class BuilderImpl implements Builder { + private String name; + private String value; + + @Override + public Builder name(String name) { + this.name = name; + return this; + } + + @Override + public Builder value(String value) { + this.value = value; + return this; + } + + @Override + public AdditionalMetadata build() { + return new AdditionalMetadata(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java index 3779726894da..7f1483d56895 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java @@ -41,6 +41,7 @@ public enum BusinessMetricFeatureId { ACCOUNT_ID_MODE_REQUIRED("R"), RESOLVED_ACCOUNT_ID("T"), DDB_MAPPER("d"), + BEARER_SERVICE_ENV_VARS("3"), UNKNOWN("Unknown"); private static final Map VALUE_MAP = diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java index 68c72afe762b..d02654a78071 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java @@ -24,6 +24,7 @@ import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.RETRY_MODE; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.SPACE; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CompletableFuture; import org.junit.Test; @@ -43,6 +44,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.useragent.SdkClientUserAgentProperties; import software.amazon.awssdk.core.internal.useragent.SdkUserAgentBuilder; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.util.SystemUserAgent; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; @@ -120,6 +122,24 @@ public void when_requestContainsApiName_apiNamesArePresent() throws Exception { assertThat(userAgentHeaders.get(0)).contains("myLib/1.0"); } + @Test + public void when_requestContainsMetadata_metadataIsPresent() throws Exception { + ApplyUserAgentStage stage = new ApplyUserAgentStage(dependencies(clientUserAgent())); + + RequestExecutionContext ctx = requestExecutionContext( + executionAttributes(Arrays.asList( + AdditionalMetadata.builder().name("name1").value("value1").build(), + AdditionalMetadata.builder().name("name2").value("value2").build() + )), + noOpRequest()); + SdkHttpFullRequest.Builder request = stage.execute(SdkHttpFullRequest.builder(), ctx); + + List userAgentHeaders = request.headers().get(HEADER_USER_AGENT); + assertThat(userAgentHeaders).isNotNull().hasSize(1); + assertThat(userAgentHeaders.get(0)).contains("md/name1#value1"); + assertThat(userAgentHeaders.get(0)).contains("md/name2#value2"); + } + @Test public void when_identityContainsProvider_authSourceIsPresent() throws Exception { ApplyUserAgentStage stage = new ApplyUserAgentStage(dependencies(clientUserAgent())); @@ -185,6 +205,12 @@ private static ExecutionAttributes executionAttributes(AwsCredentialsIdentity id return executionAttributes; } + private static ExecutionAttributes executionAttributes(List metadata) { + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA, metadata); + return executionAttributes; + } + private RequestExecutionContext requestExecutionContext(ExecutionAttributes executionAttributes, SdkRequest request) { ExecutionContext executionContext = ExecutionContext.builder() diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java new file mode 100644 index 000000000000..fa201ad0d0a1 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.useragent; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class AdditionalMetadataTest { + + @Test + public void toString_formatsCorrectly() { + AdditionalMetadata metadata = AdditionalMetadata.builder() + .name("name") + .value("value") + .build(); + assertEquals("md/name#value", metadata.toString()); + } + @Test + public void equalsHashCode() { + EqualsVerifier.forClass(AdditionalMetadata.class) + .withNonnullFields("name", "value") + .verify(); + } +} + + diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 90a0bf8925c5..de8abe3f542e 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java index 59493fd33e4d..4ae96838d8b4 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java @@ -22,11 +22,14 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.Map; import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.StringInputStream; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Provides the content stream of a request. @@ -45,7 +48,7 @@ public interface ContentStreamProvider { static ContentStreamProvider fromByteArray(byte[] bytes) { Validate.paramNotNull(bytes, "bytes"); byte[] copy = Arrays.copyOf(bytes, bytes.length); - return () -> new ByteArrayInputStream(copy); + return fromByteArrayUnsafe(copy); } /** @@ -58,7 +61,17 @@ static ContentStreamProvider fromByteArray(byte[] bytes) { */ static ContentStreamProvider fromByteArrayUnsafe(byte[] bytes) { Validate.paramNotNull(bytes, "bytes"); - return () -> new ByteArrayInputStream(bytes); + return new ContentStreamProvider() { + @Override + public InputStream newStream() { + return new ByteArrayInputStream(bytes); + } + + @Override + public String name() { + return ProviderType.BYTES.getName(); + } + }; } /** @@ -67,7 +80,17 @@ static ContentStreamProvider fromByteArrayUnsafe(byte[] bytes) { static ContentStreamProvider fromString(String string, Charset charset) { Validate.paramNotNull(string, "string"); Validate.paramNotNull(charset, "charset"); - return () -> new StringInputStream(string, charset); + return new ContentStreamProvider() { + @Override + public InputStream newStream() { + return new StringInputStream(string, charset); + } + + @Override + public String name() { + return ProviderType.STRING.getName(); + } + }; } /** @@ -105,6 +128,11 @@ public InputStream newStream() { throw new IllegalStateException("Content input stream does not support mark/reset, " + "and was already read once."); } + + @Override + public String name() { + return ProviderType.STREAM.getName(); + } }; } @@ -125,6 +153,11 @@ public InputStream newStream() { lastStream = inputStreamSupplier.get(); return lastStream; } + + @Override + public String name() { + return ProviderType.STREAM.getName(); + } }; } @@ -132,4 +165,47 @@ public InputStream newStream() { * @return The content stream. */ InputStream newStream(); + + /** + * Each ContentStreamProvider should return a well-formed name that can be used to identify the implementation. + * The stream name should only include alphanumeric characters. + * + * @return String containing the identifying name of this ContentStreamProvider implementation. + */ + default String name() { + return ProviderType.UNKNOWN.getName(); + } + + @SdkProtectedApi + enum ProviderType { + FILE("File", "f"), + BYTES("Bytes", "b"), + STRING("String", "c"), + STREAM("Stream", "s"), + UNKNOWN("Unknown", "u"); + + private static final Map VALUE_MAP = + EnumUtils.uniqueIndex(ProviderType.class, ProviderType::getName); + + private final String name; + private final String shortValue; + + + ProviderType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } + } } diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 67e732e4ed47..97cf9a73b52f 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 0daf3595a8db..7e443c089d0b 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index fec6f3778bb2..973170d4b266 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/http-clients/pom.xml b/http-clients/pom.xml index ba4fbe20132d..c8ae11414a49 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index 0920cd0cf322..77838ece32b4 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 @@ -60,18 +60,6 @@ junit-vintage-engine test - - software.amazon.awssdk - s3 - ${awsjavasdk.version} - test - - - service-test-utils - software.amazon.awssdk - ${awsjavasdk.version} - test - org.assertj assertj-core @@ -83,12 +71,6 @@ ${awsjavasdk.version} test - - software.amazon.awssdk - regions - ${awsjavasdk.version} - test - software.amazon.awssdk test-utils diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index d7b8f408a29f..805b4428b91c 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/emf-metric-logging-publisher/pom.xml b/metric-publishers/emf-metric-logging-publisher/pom.xml index 42e46af14dd2..8d557c11d0c1 100644 --- a/metric-publishers/emf-metric-logging-publisher/pom.xml +++ b/metric-publishers/emf-metric-logging-publisher/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk metric-publishers - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT emf-metric-logging-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index d6cdbc54a98d..7a40be4ead53 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index 9320b9157a9d..71ee1b20c6ba 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -101,8 +101,8 @@ ${project.version} - 2.31.51 - 2.31.50 + 2.31.64 + 2.31.63 2.15.2 2.15.2 2.17.3 diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 84bc7a5874d1..a98b09f50ed4 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../pom.xml release-scripts diff --git a/scripts/changelog/__init__.py b/scripts/changelog/__init__.py index 8b137891791f..160cebd9c3a9 100644 --- a/scripts/changelog/__init__.py +++ b/scripts/changelog/__init__.py @@ -1 +1,2 @@ - +#!/usr/bin/env python3 +# Package initialization file diff --git a/scripts/changelog/git.py b/scripts/changelog/git.py index 0a8e5e8646f7..9eaedf69c2eb 100644 --- a/scripts/changelog/git.py +++ b/scripts/changelog/git.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import subprocess def stage_file(filename): return subprocess.call(["git", "add", "-A", filename]) \ No newline at end of file diff --git a/scripts/changelog/model.py b/scripts/changelog/model.py index 0b21fedd415f..12d334d7d02a 100644 --- a/scripts/changelog/model.py +++ b/scripts/changelog/model.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 class ReleaseChanges(object): def __init__(self, version, date, entries): self.version = version @@ -32,4 +33,5 @@ def prerelease_version_number(self): preview_prefix_len = len("preview-") prerelease_version = self.prerelease[preview_prefix_len:] if prerelease_version != "": - return int(prerelease_version) \ No newline at end of file + return int(prerelease_version) + return None \ No newline at end of file diff --git a/scripts/changelog/util.py b/scripts/changelog/util.py index acb86d8d8049..1815d8b282ed 100644 --- a/scripts/changelog/util.py +++ b/scripts/changelog/util.py @@ -1,14 +1,23 @@ +#!/usr/bin/env python3 import json import os from datetime import date +import functools from changelog.model import ReleaseChanges, ChangelogEntry, Version -def version_cmp(a,b): +def version_cmp(a, b): aa = [a.major, a.minor, a.patch, a.prerelease_version_number()] bb = [b.major, b.minor, b.patch, b.prerelease_version_number()] - return cmp(bb,aa) + # In Python 3, we need to implement our own comparison function + # since the built-in cmp function was removed + if aa > bb: + return -1 + elif aa < bb: + return 1 + else: + return 0 def load_all_released_changes(d): if not os.path.isdir(d): diff --git a/scripts/changelog/writer.py b/scripts/changelog/writer.py index ad9cad65e502..ef714a81b021 100644 --- a/scripts/changelog/writer.py +++ b/scripts/changelog/writer.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 from changelog.git import stage_file from changelog.util import load_all_released_changes, load_unreleased_changes, version_cmp from functools import cmp_to_key @@ -70,9 +71,6 @@ def group_entries(self): def get_sorted_categories(self): return sorted(list(self.categories)) - def is_service_category(self,s): - return s.lower() not in NON_SERVICE_CATEGORIES - def write_header(self): version_string = self.current_changes.version if version_string is None: diff --git a/scripts/doc_crosslinks/generate_cross_link_data.py b/scripts/doc_crosslinks/generate_cross_link_data.py index 247cd155a070..699ec7319d90 100644 --- a/scripts/doc_crosslinks/generate_cross_link_data.py +++ b/scripts/doc_crosslinks/generate_cross_link_data.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import os import argparse import io diff --git a/scripts/finalize-release-changes b/scripts/finalize-release-changes index f22b63872a14..65625c891930 100755 --- a/scripts/finalize-release-changes +++ b/scripts/finalize-release-changes @@ -1,6 +1,4 @@ -#!/usr/bin/env python - -from __future__ import print_function +#!/usr/bin/env python3 import argparse import os diff --git a/scripts/generate-changelog b/scripts/generate-changelog index 882d3657fdd4..722154ba8b45 100755 --- a/scripts/generate-changelog +++ b/scripts/generate-changelog @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from changelog.writer import write_changelog diff --git a/scripts/new-change b/scripts/new-change index cfe8801319e3..093a9edafaea 100755 --- a/scripts/new-change +++ b/scripts/new-change @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # This file is borrowed from the aws/aws-cli project with the following modifications: # - Add a 'deprecation' category, and validation for the category value # - Modify the 'linkify' method to use Markdown syntax instead of reStructuredText (rst) diff --git a/scripts/run-integ-test b/scripts/run-integ-test index 13f10827b7f0..556d2cc91f58 100755 --- a/scripts/run-integ-test +++ b/scripts/run-integ-test @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Run Integ Tests based on the changed files """ @@ -24,9 +24,9 @@ def check_diffs(): diff, stderr = process.communicate() - if process.returncode !=0: + if process.returncode != 0: raise Exception("Unable to do git diff") - return diff.splitlines(False) + return diff.decode('utf-8').splitlines() def get_modules(file_path): """ @@ -44,7 +44,7 @@ def get_modules(file_path): return core_modules_to_test if top_directory in ["http-clients"]: return http_modules_to_test.get(path[1]) - elif top_directory== "services": + elif top_directory == "services": return path[1] def run_tests(modules): diff --git a/scripts/validate-brazil-config b/scripts/validate-brazil-config new file mode 100755 index 000000000000..7b58aacc9cf0 --- /dev/null +++ b/scripts/validate-brazil-config @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +from pathlib import Path +import json +import sys +import re + +# This script validates that: +# - All SDK modules are mapped or skipped for internal import +# - All non-SDK dependencies of imported modules are mapped to internal +# packages +# +# Usage: validate-brazil-config [module-paths-file] [dependencies-file] + +# Generating module-paths-file: +# mvn exec:exec -Dexec.executable=pwd -pl \!:aws-sdk-java-pom,\!:sdk-benchmarks,\!:module-path-tests -q 2>&1 > modules.txt +# +# Generates contents similar to: +# /workspace/aws-sdk-java-v2/build-tools +# /workspace/aws-sdk-java-v2/core +# /workspace/aws-sdk-java-v2/core/annotations +# /workspace/aws-sdk-java-v2/utils +# /workspace/aws-sdk-java-v2/test/test-utils +# /workspace/aws-sdk-java-v2/core/metrics-spi +# /workspace/aws-sdk-java-v2/http-client-spi +# /workspace/aws-sdk-java-v2/core/endpoints-spi +# /workspace/aws-sdk-java-v2/core/identity-spi +# /workspace/aws-sdk-java-v2/core/http-auth-spi +# ... + +# Generating dependencies-file: +# mvn dependency:list -DexcludeTransitive=true -DincludeScope=runtime 2>&1 > deps.txt +# +# Generates content similar to: +# +# [INFO] -----------------< software.amazon.awssdk:test-utils >------------------ +# [INFO] Building AWS Java SDK :: Test :: Test Utils 2.31.61-SNAPSHOT [6/493] +# [INFO] from test/test-utils/pom.xml +# [INFO] --------------------------------[ jar ]--------------------------------- +# [INFO] +# [INFO] --- dependency:3.1.1:list (default-cli) @ test-utils --- +# [INFO] +# [INFO] The following files have been resolved: +# [INFO] org.slf4j:slf4j-api:jar:1.7.36:compile -- module org.slf4j [auto] +# [INFO] org.junit.jupiter:junit-jupiter:jar:5.10.0:compile -- module org.junit.jupiter +# [INFO] com.fasterxml.jackson.core:jackson-core:jar:2.15.2:compile -- module com.fasterxml.jackson.core +# ... + +brazil_import_config_path=".brazil.json" + +with open(brazil_import_config_path) as f: + brazil_import_config = json.loads(f.read()) + +modules_path=sys.argv[1] + +core_modules = set() + +with open(modules_path) as f: + for l in f.readlines(): + l = l.strip() + module_path = Path(l) + name = module_path.name + if module_path.parent.name != 'services': + core_modules.add(name) + +# Ensure all 'core' modules are mapped. For the purposes of this validation, we +# don't care if we map to a package name or skip import. +config_modules = brazil_import_config['modules'] +for core_module in core_modules: + if core_module not in config_modules: + raise Exception(f"The module {core_module} is not mapped!") + + +# Ensure all dependencies are mapped. +current_module_pattern = re.compile(r"\[INFO\] --- .*:list \(default-cli\) @ (.*) ---") +dependency_pattern = re.compile(r"\[INFO\] ([^: ]+:[^: ]+):jar:[^: ]+:(compile|runtime)") + +deps_path=sys.argv[2] +config_dependencies = brazil_import_config['dependencies'] +with open(deps_path) as f: + for l in f.readlines(): + # Match a line that gives the name of the current module + match = current_module_pattern.match(l) + if match is not None: + # Unless explicitly skipped, all modules are imported + skipping_import = False + current_module = match.group(1) + + if current_module in config_modules: + module_import = config_modules[current_module] + + if 'skipImport' in module_import and module_import['skipImport']: + print(f"Module import skipped for {current_module}") + skipping_import = True + + continue + + # Match a line that gives a dependency of a given module + match = dependency_pattern.match(l) + if match is not None and \ + not skipping_import and \ + not match.group(1).startswith("software.amazon.awssdk:"): + # The current module is being imported, and this dependency is not an SDK + # module. Ensure that it's mapped + dependency_name = match.group(1) + if dependency_name not in config_dependencies: + raise Exception(f"The dependency {dependency_name} is not mapped!") \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index fec76a6af84f..0948ff1f2f93 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java new file mode 100644 index 000000000000..fba5ebf91225 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +@SdkInternalApi +public class TableIndices { + private final List indices; + + public TableIndices(List indices) { + this.indices = indices; + } + + public List localSecondaryIndices() { + return Collections.unmodifiableList(indices.stream() + .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) + .filter(index -> !index.partitionKey().isPresent()) + .map(TableIndices::mapIndexMetadataToEnhancedLocalSecondaryIndex) + .collect(Collectors.toList())); + } + + public List globalSecondaryIndices() { + return Collections.unmodifiableList(indices.stream() + .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) + .filter(index -> index.partitionKey().isPresent()) + .map(TableIndices::mapIndexMetadataToEnhancedGlobalSecondaryIndex) + .collect(Collectors.toList())); + } + + private static EnhancedLocalSecondaryIndex mapIndexMetadataToEnhancedLocalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedLocalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); + } + + private static EnhancedGlobalSecondaryIndex mapIndexMetadataToEnhancedGlobalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedGlobalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java index 1538e977b4c3..cd281dec3d24 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java @@ -17,6 +17,7 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; +import java.util.ArrayList; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -25,6 +26,7 @@ import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.TableIndices; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteTableOperation; @@ -114,7 +116,12 @@ public CompletableFuture createTable(Consumer createTable() { - return createTable(CreateTableEnhancedRequest.builder().build()); + TableIndices indices = new TableIndices(new ArrayList<>(tableSchema.tableMetadata().indices())); + + return createTable(CreateTableEnhancedRequest.builder() + .localSecondaryIndices(indices.localSecondaryIndices()) + .globalSecondaryIndices(indices.globalSecondaryIndices()) + .build()); } @Override diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java index 1bd2638892bd..31ce811b3483 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java @@ -15,22 +15,17 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; -import static java.util.Collections.emptyList; import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import java.util.ArrayList; import java.util.function.Consumer; -import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; -import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; import software.amazon.awssdk.enhanced.dynamodb.Key; -import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.TableIndices; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteTableOperation; @@ -46,8 +41,6 @@ import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedResponse; import software.amazon.awssdk.enhanced.dynamodb.model.DescribeTableEnhancedResponse; -import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; -import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedResponse; import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; @@ -61,7 +54,6 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbClient; import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; -import software.amazon.awssdk.services.dynamodb.model.ProjectionType; @SdkInternalApi public class DefaultDynamoDbTable implements DynamoDbTable { @@ -126,52 +118,14 @@ public void createTable(Consumer requestCons @Override public void createTable() { - Map> indexGroups = splitSecondaryIndicesToLocalAndGlobalOnes(); + TableIndices indices = new TableIndices(new ArrayList<>(tableSchema.tableMetadata().indices())); + createTable(CreateTableEnhancedRequest.builder() - .localSecondaryIndices(extractLocalSecondaryIndices(indexGroups)) - .globalSecondaryIndices(extractGlobalSecondaryIndices(indexGroups)) + .localSecondaryIndices(indices.localSecondaryIndices()) + .globalSecondaryIndices(indices.globalSecondaryIndices()) .build()); } - private Map> splitSecondaryIndicesToLocalAndGlobalOnes() { - Collection indices = tableSchema.tableMetadata().indices(); - return indices.stream() - .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) - .collect(Collectors.groupingBy(metadata -> { - String partitionKeyName = metadata.partitionKey().map(KeyAttributeMetadata::name).orElse(null); - if (partitionKeyName == null) { - return IndexType.LSI; - } - return IndexType.GSI; - })); - } - - private List extractLocalSecondaryIndices(Map> indicesGroups) { - return indicesGroups.getOrDefault(IndexType.LSI, emptyList()).stream() - .map(this::mapIndexMetadataToEnhancedLocalSecondaryIndex) - .collect(Collectors.toList()); - } - - private EnhancedLocalSecondaryIndex mapIndexMetadataToEnhancedLocalSecondaryIndex(IndexMetadata indexMetadata) { - return EnhancedLocalSecondaryIndex.builder() - .indexName(indexMetadata.name()) - .projection(pb -> pb.projectionType(ProjectionType.ALL)) - .build(); - } - - private List extractGlobalSecondaryIndices(Map> indicesGroups) { - return indicesGroups.getOrDefault(IndexType.GSI, emptyList()).stream() - .map(this::mapIndexMetadataToEnhancedGlobalSecondaryIndex) - .collect(Collectors.toList()); - } - - private EnhancedGlobalSecondaryIndex mapIndexMetadataToEnhancedGlobalSecondaryIndex(IndexMetadata indexMetadata) { - return EnhancedGlobalSecondaryIndex.builder() - .indexName(indexMetadata.name()) - .projection(pb -> pb.projectionType(ProjectionType.ALL)) - .build(); - } - @Override public T deleteItem(DeleteItemEnhancedRequest request) { TableOperation> operation = DeleteItemOperation.create(request); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java new file mode 100644 index 000000000000..fe02468958d1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java @@ -0,0 +1,105 @@ +package software.amazon.awssdk.enhanced.dynamodb; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.TableIndices; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticIndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticKeyAttributeMetadata; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; + +public class TableIndicesTest { + + @Test + public void testLocalSecondaryIndices_onlyIncludesLSIs() { + List indices = Arrays.asList(StaticIndexMetadata.builder() + .name("lsi-1") + .build(), + StaticIndexMetadata.builder() + .name("lsi-2") + .build(), + StaticIndexMetadata.builder() + .name("gsi-1") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey", + AttributeValueType.N)) + .build()); + + TableIndices tableIndices = new TableIndices(indices); + + List lsiList = tableIndices.localSecondaryIndices(); + + assertEquals(2, lsiList.size()); + assertTrue(lsiList.stream().anyMatch(i -> "lsi-1".equals(i.indexName()))); + assertTrue(lsiList.stream().anyMatch(i -> "lsi-2".equals(i.indexName()))); + } + + @Test + public void testGlobalSecondaryIndices_onlyIncludesGSIs() { + List indices = Arrays.asList(StaticIndexMetadata.builder() + .name("lsi-1") + .build(), + StaticIndexMetadata.builder() + .name("gsi-1") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey1", + AttributeValueType.N)) + .build(), + StaticIndexMetadata.builder() + .name("gsi-2") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey2", + AttributeValueType.N)) + .build()); + + TableIndices tableIndices = new TableIndices(indices); + + List gsiList = tableIndices.globalSecondaryIndices(); + + assertEquals(2, gsiList.size()); + assertTrue(gsiList.stream().anyMatch(i -> "gsi-1".equals(i.indexName()))); + assertTrue(gsiList.stream().anyMatch(i -> "gsi-2".equals(i.indexName()))); + } + + @Test + public void testPrimaryIndexIsExcluded() { + List indices = Arrays.asList(StaticIndexMetadata.builder() + .name(TableMetadata.primaryIndexName()) + .partitionKey(StaticKeyAttributeMetadata.create("pk", + AttributeValueType.S)) + .build(), + StaticIndexMetadata.builder() + .name("lsi-1") + .build(), + StaticIndexMetadata.builder() + .name("gsi-1") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey", + AttributeValueType.N)) + .build()); + + TableIndices tableIndices = new TableIndices(indices); + + List gsiList = tableIndices.globalSecondaryIndices(); + List lsiList = tableIndices.localSecondaryIndices(); + + assertEquals(1, gsiList.size()); + assertEquals("gsi-1", gsiList.get(0).indexName()); + + assertEquals(1, lsiList.size()); + assertEquals("lsi-1", lsiList.get(0).indexName()); + } + + @Test + public void testEmptyIndexList() { + TableIndices tableIndices = new TableIndices(Collections.emptyList()); + + assertTrue(tableIndices.globalSecondaryIndices().isEmpty()); + assertTrue(tableIndices.localSecondaryIndices().isEmpty()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java index 4f61db7487e9..b7cbb4eb428a 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java @@ -166,7 +166,7 @@ public void beforeWrite_returnsNoOpModification_ifVersionAttributeNotDefined() { } @Test(expected = IllegalArgumentException.class) - public void beforeWrite_throwsIllegalArgumentException_ifVersionAttributeIsWrongType() { + public void beforeWrite_throwsIllegalArgumentException_ifVersioPnAttributeIsWrongType() { FakeItem fakeItem = createUniqueFakeItem(); Map fakeItemWIthBadVersion = new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java index cbf1b7acba56..dd5745b8c048 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java @@ -16,13 +16,22 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import java.util.Iterator; +import java.util.List; import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; @@ -31,6 +40,10 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; +import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; +import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; +import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; @RunWith(MockitoJUnitRunner.class) public class DefaultDynamoDbAsyncTableTest { @@ -113,4 +126,55 @@ public void keyFrom_primaryIndex_partitionAndNullSort() { assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); assertThat(key.sortKeyValue(), is(Optional.empty())); } + + @Test + public void createTable_doesNotTreatPrimaryIndexAsAnyOfSecondaryIndexes() { + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItem.getTableSchema(), + "test_table"); + + when(mockDynamoDbAsyncClient.createTable(any(CreateTableRequest.class))) + .thenReturn(CompletableFuture.completedFuture(CreateTableResponse.builder().build())); + + dynamoDbMappedIndex.createTable().join(); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(CreateTableRequest.class); + verify(mockDynamoDbAsyncClient).createTable(requestCaptor.capture()); + + CreateTableRequest request = requestCaptor.getValue(); + + assertThat(request.localSecondaryIndexes().size(), is(0)); + assertThat(request.globalSecondaryIndexes().size(), is(0)); + } + + @Test + public void createTable_groupsSecondaryIndexesExistingInTableSchema() { + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table"); + + when(mockDynamoDbAsyncClient.createTable(any(CreateTableRequest.class))) + .thenReturn(CompletableFuture.completedFuture(CreateTableResponse.builder().build())); + + dynamoDbMappedIndex.createTable().join(); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(CreateTableRequest.class); + verify(mockDynamoDbAsyncClient).createTable(requestCaptor.capture()); + + CreateTableRequest request = requestCaptor.getValue(); + + assertThat(request.localSecondaryIndexes().size(), is(1)); + Iterator lsiIterator = request.localSecondaryIndexes().iterator(); + assertThat(lsiIterator.next().indexName(), is("lsi_1")); + + assertThat(request.globalSecondaryIndexes().size(), is(2)); + List globalIndicesNames = request.globalSecondaryIndexes().stream() + .map(GlobalSecondaryIndex::indexName) + .collect(Collectors.toList()); + assertThat(globalIndicesNames, containsInAnyOrder("gsi_1", "gsi_2")); + } } diff --git a/services-custom/iam-policy-builder/pom.xml b/services-custom/iam-policy-builder/pom.xml index 1c28a8281f42..571fe37000c0 100644 --- a/services-custom/iam-policy-builder/pom.xml +++ b/services-custom/iam-policy-builder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml iam-policy-builder diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 3c15d347e335..f4ae38a42abd 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-event-notifications/pom.xml b/services-custom/s3-event-notifications/pom.xml index 01bcc0836e1c..9af33e6d40a8 100644 --- a/services-custom/s3-event-notifications/pom.xml +++ b/services-custom/s3-event-notifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml s3-event-notifications diff --git a/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java b/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java index cdb1f1975e71..47e139869ec7 100644 --- a/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java +++ b/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java @@ -101,7 +101,7 @@ private S3EventNotificationRecord readEventNotificationRecord(JsonNode jsonNode) eventNotificationRecord.setEventSource(eventSource); String eventTime = expectStringOrNull(recordNode, "eventTime"); - eventNotificationRecord.setEventTime(eventName != null ? Instant.parse(eventTime) : null); + eventNotificationRecord.setEventTime(eventTime != null ? Instant.parse(eventTime) : null); RequestParameters requestParameters = readRequestParameters(recordNode.get("requestParameters")); eventNotificationRecord.setRequestParameters(requestParameters); diff --git a/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java b/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java index ef9d2c89154a..b55a2bb74142 100644 --- a/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java +++ b/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java @@ -442,6 +442,53 @@ void missingField_shouldBeNull() { assertThat(rec.getResponseElements()).isNull(); } + @Test + void eventTimeIsNullWhenEventNamePresent_shouldSucceed() { + String json = "{\n" + + " \"Records\" : [ {\n" + + " \"eventVersion\" : \"2.1\",\n" + + " \"eventSource\" : \"aws:s3\",\n" + + " \"awsRegion\" : \"us-west-2\",\n" + // missing eventTime + + " \"eventName\" : \"ObjectCreated:Put\",\n" + + " \"userIdentity\" : {\n" + + " \"principalId\" : \"AIDAJDPLRKLG7UEXAMUID\"\n" + + " },\n" + + " \"requestParameters\" : {\n" + + " \"sourceIPAddress\" : \"127.1.2.3\"\n" + + " },\n" + + " \"responseElements\":{\n" + + " \"x-amz-request-id\":\"C3D13FE58DE4C810\",\n" + + " \"x-amz-id-2\":\"FMyUVURIY8/IgAtTv8xRjskZQpcIZ9KG4V5Wp6S7S/JRWeUWerMUE5JgHvANOjpD\"\n" + + " },\n" + + " \"s3\" : {\n" + + " \"s3SchemaVersion\" : \"1.0\",\n" + + " \"configurationId\" : \"testConfigRule\",\n" + + " \"bucket\" : {\n" + + " \"name\" : \"mybucket-test\",\n" + + " \"ownerIdentity\" : {\n" + + " \"principalId\" : \"A3NL1KOZZKExample\"\n" + + " },\n" + + " \"arn\" : \"arn:aws:s3:::mybucket\"\n" + + " },\n" + + " \"object\" : {\n" + + " \"key\" : \"HappyFace-test.jpg\",\n" + + " \"size\" : 2048,\n" + + " \"eTag\" : \"d41d8cd98f00b204e9800998ecf8etag\",\n" + + " \"versionId\" : \"096fKKXTRTtl3on89fVO.nfljtsv6vid\",\n" + + " \"sequencer\" : \"0055AED6DCD9028SEQ\"\n" + + " }\n" + + " }\n" + + " } ]\n" + + "}"; + + S3EventNotification event = S3EventNotification.fromJson(json); + S3EventNotificationRecord rec = event.getRecords().get(0); + assertThat(rec).isNotNull(); + assertThat(rec.getEventName()).isEqualTo("ObjectCreated:Put"); + assertThat(rec.getEventTime()).isNull(); + } + @Test void extraFields_areIgnored() { String json = "{\"Records\":[], \"toto\":123}"; diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index a137a6361677..948f32d4295b 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java index 1398f7bd6ec4..483703c33022 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.transfer.s3.config; +import java.util.Objects; import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.services.s3.model.S3Object; @@ -39,8 +40,47 @@ public interface DownloadFilter extends Predicate { boolean test(S3Object s3Object); /** - * A {@link DownloadFilter} that downloads all non-folder objects. A folder is a 0-byte object created when a customer - * uses S3 console to create a folder, and it always ends with "/". + * Returns a composed filter that represents the logical AND of this filter and another. + * The composed filter returns true only if both this filter and the other filter return true. + * @param other a predicate that will be logically-ANDed with this + * predicate + * @return a composed filter that represents the logical AND of this filter and the other filter + * @throws NullPointerException if other is null + */ + @Override + default DownloadFilter and(Predicate other) { + Objects.requireNonNull(other, "Other predicate cannot be null"); + return s3Object -> test(s3Object) && other.test(s3Object); + } + + /** + * Returns a composed filter that represents the logical OR of this filter and another. + * The composed filter returns true if either this filter or the other filter returns true. + * @param other a predicate that will be logically-ORed with this + * predicate + * @return a composed filter that represents the logical OR of this filter and the other filter + * @throws NullPointerException if other is null + */ + @Override + default DownloadFilter or(Predicate other) { + Objects.requireNonNull(other, "Other predicate cannot be null"); + return s3Object -> test(s3Object) || other.test(s3Object); + } + + /** + * Returns a filter that represents the logical negation of this predicate. + * The returned filter returns true when this filter returns false, and vice versa. + * @return a filter that represents the logical negation of this filter + * predicate + */ + @Override + default DownloadFilter negate() { + return s3Object -> !test(s3Object); + } + + /** + * A {@link DownloadFilter} that downloads all non-folder objects. A folder is a 0-byte object created when a customer uses S3 + * console to create a folder, and it always ends with "/". * *

    * This is the default behavior if no filter is provided. diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java index cc6da432435d..b3690244f93b 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java @@ -16,8 +16,11 @@ package software.amazon.awssdk.transfer.s3.config; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import java.util.function.Function; import java.util.stream.Stream; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -38,4 +41,168 @@ public static Stream s3Objects() { void allObjectsFilter_shouldWork(S3Object s3Object, boolean result) { assertThat(DownloadFilter.allObjects().test(s3Object)).isEqualTo(result); } + + private static Stream filterOperationTestCases() { + Function folder1OrFolder3Filter = s3Object -> { + DownloadFilter folder1 = obj -> obj.key().startsWith("folder1"); + DownloadFilter folder3 = obj -> obj.key().startsWith("folder3"); + return folder1.or(folder3); + }; + + Function txtAndLargeSizeFilter = s3Object -> { + DownloadFilter txtFilter = obj -> obj.key().endsWith(".txt"); + DownloadFilter sizeFilter = obj -> obj.size() > 1000L; + return txtFilter.and(sizeFilter); + }; + + Function notFolder1Filter = s3Object -> { + DownloadFilter folder1 = obj -> obj.key().startsWith("folder1"); + return folder1.negate(); + }; + + Function notLargeSizeFilter = s3Object -> { + DownloadFilter largeSize = obj -> obj.size() > 1000L; + return largeSize.negate(); + }; + + Function complexFilter = s3Object -> { + DownloadFilter folder1 = obj -> obj.key().startsWith("folder1"); + DownloadFilter folder3 = obj -> obj.key().startsWith("folder3"); + DownloadFilter sizeFilter = obj -> obj.size() > 1000L; + return folder1.or(folder3).and(sizeFilter); + }; + Function nullParameterFilter = s3Object -> { + DownloadFilter baseFilter = obj -> obj.key().startsWith("folder1"); + return s -> { + assertThrows(NullPointerException.class, + () -> baseFilter.or(null), + "or() should throw NullPointerException when other is null"); + assertThrows(NullPointerException.class, + () -> baseFilter.and(null), + "and() should throw NullPointerException when other is null"); + return true; // Return value doesn't matter as we're testing for exceptions + }; + }; + + + return Stream.of( + // OR operation tests + Arguments.of( + "OR: folder1/test.txt matches (folder1 OR folder3)", + S3Object.builder().key("folder1/test.txt").size(2000L).build(), + folder1OrFolder3Filter, + true + ), + Arguments.of( + "OR: folder3/test.txt matches (folder1 OR folder3)", + S3Object.builder().key("folder3/test.txt").size(2000L).build(), + folder1OrFolder3Filter, + true + ), + Arguments.of( + "OR: folder2/test.txt does not match (folder1 OR folder3)", + S3Object.builder().key("folder2/test.txt").size(2000L).build(), + folder1OrFolder3Filter, + false + ), + + // AND operation tests + Arguments.of( + "AND: large .txt file matches (.txt AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(2000L).build(), + txtAndLargeSizeFilter, + true + ), + Arguments.of( + "AND: small .txt file does not match (.txt AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(500L).build(), + txtAndLargeSizeFilter, + false + ), + Arguments.of( + "AND: large .pdf file does not match (.txt AND size > 1000)", + S3Object.builder().key("folder1/test.pdf").size(2000L).build(), + txtAndLargeSizeFilter, + false + ), + + // NEGATE operation tests + Arguments.of( + "NEGATE: folder1 file does not match NOT(folder1)", + S3Object.builder().key("folder1/test.txt").size(1000L).build(), + notFolder1Filter, + false + ), + Arguments.of( + "NEGATE: folder2 file matches NOT(folder1)", + S3Object.builder().key("folder2/test.txt").size(1000L).build(), + notFolder1Filter, + true + ), + Arguments.of( + "NEGATE: large file does not match NOT(size > 1000)", + S3Object.builder().key("test.txt").size(2000L).build(), + notLargeSizeFilter, + false + ), + Arguments.of( + "NEGATE: small file matches NOT(size > 1000)", + S3Object.builder().key("test.txt").size(500L).build(), + notLargeSizeFilter, + true + ), + + // Complex chained operations + Arguments.of( + "COMPLEX: large file in folder1 matches ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(2000L).build(), + complexFilter, + true + ), + Arguments.of( + "COMPLEX: small file in folder1 does not match ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(500L).build(), + complexFilter, + false + ), + Arguments.of( + "COMPLEX: large file in folder2 does not match ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder2/test.txt").size(2000L).build(), + complexFilter, + false + ), + Arguments.of( + "COMPLEX: large file in folder3 matches ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder3/test.txt").size(2000L).build(), + complexFilter, + true + ), + // NullPointerException + Arguments.of( + "NULL: or/and with null parameter should throw NullPointerException", + S3Object.builder().key("folder1/test.txt").size(1000L).build(), + nullParameterFilter, + true + ) + + ); + } + + @ParameterizedTest + @MethodSource("filterOperationTestCases") + @DisplayName("Test DownloadFilter operations (AND, OR, NEGATE)") + void testFilterOperations(String scenario, S3Object s3Object, + Function filterFactory, + boolean expectedResult) { + // Given + DownloadFilter filter = filterFactory.apply(s3Object); + + // When + boolean actualResult = filter.test(s3Object); + + // Then + assertThat(actualResult) + .as(scenario) + .isEqualTo(expectedResult); + } } diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 9525fd16730c..e51be44777a5 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/account/pom.xml b/services/account/pom.xml index 6f892f9f651d..027f0cbca5b1 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 8656cd4a0fe8..c1354c28a106 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 23b24c01ec35..4b13e2ed38be 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 6444baaaf46f..1dd28b8e16cf 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 908ffb5cd0dd..9df12ad78af4 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplify/src/main/resources/codegen-resources/service-2.json b/services/amplify/src/main/resources/codegen-resources/service-2.json index a2d51e374032..e300f0afbf98 100644 --- a/services/amplify/src/main/resources/codegen-resources/service-2.json +++ b/services/amplify/src/main/resources/codegen-resources/service-2.json @@ -764,6 +764,10 @@ "wafConfiguration":{ "shape":"WafConfiguration", "documentation":"

    Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration with WAF.

    " + }, + "jobConfig":{ + "shape":"JobConfig", + "documentation":"

    The configuration details that apply to the jobs for an Amplify app.

    " } }, "documentation":"

    Represents the different branches of a repository for building, deploying, and hosting an Amplify app.

    " @@ -1130,6 +1134,14 @@ "member":{"shape":"Branch"}, "max":255 }, + "BuildComputeType":{ + "type":"string", + "enum":[ + "STANDARD_8GB", + "LARGE_16GB", + "XLARGE_72GB" + ] + }, "BuildSpec":{ "type":"string", "documentation":"

    The build specification (build spec) file for an Amplify app build.

    ", @@ -1144,7 +1156,7 @@ "members":{ "type":{ "shape":"CacheConfigType", - "documentation":"

    The type of cache configuration to use for an Amplify app.

    The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules. This is the default setting.

    The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key.

    " + "documentation":"

    The type of cache configuration to use for an Amplify app.

    The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules.

    The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key. This is the default setting.

    " } }, "documentation":"

    Describes the cache configuration for an Amplify app.

    For more information about how Amplify applies an optimal cache configuration for your app based on the type of content that is being served, see Managing cache configuration in the Amplify User guide.

    " @@ -1316,6 +1328,10 @@ "shape":"AutoBranchCreationConfig", "documentation":"

    The automated branch creation configuration for an Amplify app.

    " }, + "jobConfig":{ + "shape":"JobConfig", + "documentation":"

    Describes the configuration details that apply to the jobs for an Amplify app.

    " + }, "cacheConfig":{ "shape":"CacheConfig", "documentation":"

    The cache configuration for the Amplify app.

    " @@ -2292,6 +2308,17 @@ "type":"string", "max":1000 }, + "JobConfig":{ + "type":"structure", + "required":["buildComputeType"], + "members":{ + "buildComputeType":{ + "shape":"BuildComputeType", + "documentation":"

    Specifies the size of the build instance. Amplify supports three instance sizes: STANDARD_8GB, LARGE_16GB, and XLARGE_72GB. If you don't specify a value, Amplify uses the STANDARD_8GB default.

    The following list describes the CPU, memory, and storage capacity for each build instance type:

    STANDARD_8GB
    • vCPUs: 4

    • Memory: 8 GiB

    • Disk space: 128 GB

    LARGE_16GB
    • vCPUs: 8

    • Memory: 16 GiB

    • Disk space: 128 GB

    XLARGE_72GB
    • vCPUs: 36

    • Memory: 72 GiB

    • Disk space: 256 GB

    " + } + }, + "documentation":"

    Describes the configuration details that apply to the jobs for an Amplify app.

    Use JobConfig to apply configuration to jobs, such as customizing the build instance size when you create or update an Amplify app. For more information about customizable build instances, see Custom build instances in the Amplify User Guide.

    " + }, "JobId":{ "type":"string", "max":255, @@ -3207,8 +3234,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response for the tag resource request.

    " }, "TagValue":{ @@ -3276,8 +3302,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response for the untag resource request.

    " }, "UpdateAppRequest":{ @@ -3366,6 +3391,10 @@ "shape":"AccessToken", "documentation":"

    The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.

    Use accessToken for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use oauthToken.

    You must specify either accessToken or oauthToken when you update an app.

    Existing Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide .

    " }, + "jobConfig":{ + "shape":"JobConfig", + "documentation":"

    Describes the configuration details that apply to the jobs for an Amplify app.

    " + }, "cacheConfig":{ "shape":"CacheConfig", "documentation":"

    The cache configuration for the Amplify app.

    " diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 08de4b27c936..a5ad0584b969 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index 45eda53d9491..9b49e688342d 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index c2fc1aaf4ae2..16ba83c8573f 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigateway/src/main/resources/codegen-resources/service-2.json b/services/apigateway/src/main/resources/codegen-resources/service-2.json index 93c7f5bc0e72..67b4e67e4637 100644 --- a/services/apigateway/src/main/resources/codegen-resources/service-2.json +++ b/services/apigateway/src/main/resources/codegen-resources/service-2.json @@ -2825,6 +2825,10 @@ "policy":{ "shape":"String", "documentation":"

    A stringified JSON policy document that applies to the execute-api service for this DomainName regardless of the caller and Method configuration. Supported only for private custom domain names.

    " + }, + "routingMode":{ + "shape":"RoutingMode", + "documentation":"

    The routing mode for this domain name. The routing mode determines how API Gateway sends traffic from your custom domain name to your private APIs.

    " } }, "documentation":"

    A request to create a new domain name.

    " @@ -3765,7 +3769,7 @@ }, "domainNameArn":{ "shape":"String", - "documentation":"

    The ARN of the domain name. Supported only for private custom domain names.

    " + "documentation":"

    The ARN of the domain name.

    " }, "certificateName":{ "shape":"String", @@ -3838,6 +3842,10 @@ "policy":{ "shape":"String", "documentation":"

    A stringified JSON policy document that applies to the execute-api service for this DomainName regardless of the caller and Method configuration. Supported only for private custom domain names.

    " + }, + "routingMode":{ + "shape":"RoutingMode", + "documentation":"

    The routing mode for this domain name. The routing mode determines how API Gateway sends traffic from your custom domain name to your private APIs.

    " } }, "documentation":"

    Represents a custom domain name as a user-friendly host name of an API (RestApi).

    " @@ -4076,8 +4084,7 @@ }, "GetAccountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Requests API Gateway to get information about the current Account resource.

    " }, "GetApiKeyRequest":{ @@ -5366,7 +5373,7 @@ }, "timeoutInMillis":{ "shape":"Integer", - "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.

    " + "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds. You can increase the default value to longer than 29 seconds for Regional or private APIs only.

    " }, "cacheNamespace":{ "shape":"String", @@ -5976,7 +5983,7 @@ }, "timeoutInMillis":{ "shape":"NullableInteger", - "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.

    " + "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds. You can increase the default value to longer than 29 seconds for Regional or private APIs only.

    " }, "tlsConfig":{"shape":"TlsConfig"} }, @@ -6385,6 +6392,14 @@ }, "documentation":"

    Contains references to your APIs and links that guide you in how to interact with your collection. A collection offers a paginated view of your APIs.

    " }, + "RoutingMode":{ + "type":"string", + "enum":[ + "BASE_PATH_MAPPING_ONLY", + "ROUTING_RULE_ONLY", + "ROUTING_RULE_THEN_BASE_PATH_MAPPING" + ] + }, "SdkConfigurationProperty":{ "type":"structure", "members":{ diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index b8ebe809772f..9940390ed5d7 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 259960f30341..2e0123a0ef7a 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json b/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json index f3b7195d8e13..52644aaef144 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination" : { } + "pagination": { + "ListRoutingRules": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RoutingRules" + } + } } \ No newline at end of file diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json index cce75eb08107..fa0e91c1b3f0 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json @@ -358,6 +358,40 @@ ], "documentation": "

    Creates a RouteResponse for a Route.

    " }, + "CreateRoutingRule": { + "name": "CreateRoutingRule", + "documentation": "

    Creates a RoutingRule.

    ", + "http": { + "method": "POST", + "requestUri": "/v2/domainnames/{domainName}/routingrules", + "responseCode": 201 + }, + "input": { + "shape": "CreateRoutingRuleRequest" + }, + "output": { + "shape": "CreateRoutingRuleResponse", + "documentation": "

    The request has succeeded and has resulted in the creation of a resource.

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The resource already exists.

    " + } + ] + }, "CreateStage": { "name": "CreateStage", "http": { @@ -730,6 +764,33 @@ ], "documentation": "

    Deletes the RouteSettings for a stage.

    " }, + "DeleteRoutingRule": { + "name": "DeleteRoutingRule", + "documentation": "

    Deletes a routing rule.

    ", + "http": { + "method": "DELETE", + "requestUri": "/v2/domainnames/{domainName}/routingrules/{routingRuleId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRoutingRuleRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + } + ], + "idempotent": true + }, "DeleteStage": { "name": "DeleteStage", "http": { @@ -1419,6 +1480,66 @@ ], "documentation": "

    Gets the Routes for an API.

    " }, + "GetRoutingRule": { + "name": "GetRoutingRule", + "documentation": "

    Gets a routing rule.

    ", + "http": { + "method": "GET", + "requestUri": "/v2/domainnames/{domainName}/routingrules/{routingRuleId}", + "responseCode": 200 + }, + "input": { + "shape": "GetRoutingRuleRequest" + }, + "output": { + "shape": "GetRoutingRuleResponse", + "documentation": "

    Success

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + } + ] + }, + "ListRoutingRules": { + "name": "ListRoutingRules", + "documentation": "

    Lists routing rules.

    ", + "http": { + "method": "GET", + "requestUri": "/v2/domainnames/{domainName}/routingrules", + "responseCode": 200 + }, + "input": { + "shape": "ListRoutingRulesRequest" + }, + "output": { + "shape": "ListRoutingRulesResponse", + "documentation": "

    Success

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + } + ] + }, "GetStage": { "name": "GetStage", "http": { @@ -1595,6 +1716,41 @@ ], "documentation": "

    Imports an API.

    " }, + "PutRoutingRule": { + "name": "PutRoutingRule", + "documentation": "

    Updates a routing rule.

    ", + "http": { + "method": "PUT", + "requestUri": "/v2/domainnames/{domainName}/routingrules/{routingRuleId}", + "responseCode": 200 + }, + "input": { + "shape": "PutRoutingRuleRequest" + }, + "output": { + "shape": "PutRoutingRuleResponse", + "documentation": "

    Success

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The resource already exists.

    " + } + ], + "idempotent": true + }, "ReimportApi": { "name": "ReimportApi", "http": { @@ -3108,6 +3264,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -3137,6 +3298,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -3161,6 +3327,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -3171,6 +3341,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -3999,6 +4174,74 @@ } } }, + "CreateRoutingRuleRequest": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    Represents a routing rule action. The only supported action is invokeApi.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    Represents a condition. Conditions can contain up to two matchHeaders conditions and one matchBasePaths conditions. API Gateway evaluates header conditions and base path conditions together. You can only use AND between header and base path conditions.

    " + }, + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "Represents the priority of the routing rule." + } + }, + "required": [ + "DomainName", + "Actions", + "Priority", + "Conditions" + ] + }, + "CreateRoutingRuleResponse": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    Represents a routing rule action. The only supported action is invokeApi.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    Represents a condition. Conditions can contain up to two matchHeaders conditions and one matchBasePaths conditions. API Gateway evaluates header conditions and base path conditions together. You can only use AND between header and base path conditions.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    Represents the priority of the routing rule.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The ARN of the domain name.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, "CreateStageInput": { "type": "structure", "members": { @@ -4610,6 +4853,33 @@ "ApiId" ] }, + "DeleteRoutingRuleRequest": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "RoutingRuleId": { + "shape": "__string", + "location": "uri", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + }, + "required": [ + "RoutingRuleId", + "DomainName" + ] + }, "DeleteStageRequest": { "type": "structure", "members": { @@ -4647,8 +4917,7 @@ }, "DeleteVpcLinkResponse": { "type": "structure", - "members": { - } + "members": {} }, "Deployment": { "type": "structure", @@ -4724,6 +4993,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -4734,6 +5007,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -5403,6 +5681,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -5413,6 +5695,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -6091,6 +6378,109 @@ } } }, + "GetRoutingRuleRequest": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "RoutingRuleId": { + "shape": "__string", + "location": "uri", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + }, + "required": [ + "RoutingRuleId", + "DomainName" + ] + }, + "GetRoutingRuleResponse": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The resulting action based on matching a routing rules condition. Only InvokeApi is supported.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The conditions of the routing rule.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The order in which API Gateway evaluates a rule. Priority is evaluated from the lowest value to the highest value.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The routing rule ARN.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, + "ListRoutingRulesRequest": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of elements to be returned for this resource.

    " + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The next page of elements from this collection. Not valid for the last element of the collection.

    " + } + }, + "required": [ + "DomainName" + ] + }, + "ListRoutingRulesResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "NextToken", + "locationName": "nextToken" + }, + "RoutingRules": { + "shape": "__listOfRoutingRule", + "locationName": "routingRules", + "documentation": "

    The routing rules.

    " + } + } + }, "GetStageRequest": { "type": "structure", "members": { @@ -6890,6 +7280,81 @@ "HTTP" ] }, + "PutRoutingRuleRequest": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The routing rule action.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The routing rule condition.

    " + }, + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The routing rule priority.

    " + }, + "RoutingRuleId": { + "shape": "__string", + "location": "uri", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + }, + "required": [ + "RoutingRuleId", + "DomainName", + "Actions", + "Priority", + "Conditions" + ] + }, + "PutRoutingRuleResponse": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The routing rule action.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The conditions of the routing rule.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The routing rule priority.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The routing rule ARN.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, "ReimportApiInput": { "type": "structure", "members": { @@ -7238,6 +7703,190 @@ }, "documentation": "

    Represents a collection of routes.

    " }, + "RoutingMode": { + "type": "string", + "enum": [ + "API_MAPPING_ONLY", + "ROUTING_RULE_ONLY", + "ROUTING_RULE_THEN_API_MAPPING" + ] + }, + "RoutingRule": { + "type": "structure", + "documentation": "

    Represents a routing rule.

    ", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The routing rule action.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The routing rule condition.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The routing rule priority.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The routing rule ARN.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, + "RoutingRuleAction": { + "type": "structure", + "members": { + "InvokeApi": { + "shape": "RoutingRuleActionInvokeApi", + "locationName": "invokeApi" + } + }, + "documentation": "

    The routing rule action.

    ", + "required": [ + "InvokeApi" + ] + }, + "RoutingRuleActionInvokeApi": { + "type": "structure", + "members": { + "ApiId": { + "shape": "Id", + "locationName": "apiId" + }, + "Stage": { + "shape": "StringWithLengthBetween1And128", + "locationName": "stage" + }, + "StripBasePath": { + "shape": "__boolean", + "locationName": "stripBasePath", + "documentation": "

    The strip base path setting.

    " + } + }, + "documentation": "

    Represents an InvokeApi action.

    ", + "required": [ + "Stage", + "ApiId" + ] + }, + "RoutingRuleCondition": { + "type": "structure", + "documentation": "

    Represents a routing rule condition.

    ", + "members": { + "MatchBasePaths": { + "shape": "RoutingRuleMatchBasePaths", + "locationName": "matchBasePaths", + "documentation": "

    The base path to be matched.

    " + }, + "MatchHeaders": { + "shape": "RoutingRuleMatchHeaders", + "locationName": "matchHeaders", + "documentation": "

    The headers to be matched.

    " + } + } + }, + "RoutingRuleInput": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions" + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions" + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority" + } + }, + "required": [ + "Actions", + "Priority", + "Conditions" + ] + }, + "RoutingRuleMatchBasePaths": { + "type": "structure", + "members": { + "AnyOf": { + "shape": "__listOfSelectionKey", + "locationName": "anyOf", + "documentation": "The string of the case sensitive base path to be matched." + } + }, + "documentation": "

    Represents a MatchBasePaths condition.

    ", + "required": [ + "AnyOf" + ] + }, + "RoutingRuleMatchHeaderValue": { + "type": "structure", + "documentation": "

    Represents a MatchHeaderValue.

    ", + "members": { + "Header": { + "shape": "SelectionKey", + "locationName": "header" + }, + "ValueGlob": { + "shape": "SelectionExpression", + "locationName": "valueGlob" + } + }, + "required": [ + "ValueGlob", + "Header" + ] + }, + "RoutingRuleMatchHeaders": { + "type": "structure", + "documentation": "

    Represents a MatchHeaders condition.

    ", + "members": { + "AnyOf": { + "shape": "__listOfRoutingRuleMatchHeaderValue", + "locationName": "anyOf", + "documentation": "

    The header name and header value glob to be matched. The matchHeaders condition is matched if any of the header name and header value globs are matched.

    " + } + }, + "required": [ + "AnyOf" + ] + }, + "RoutingRulePriority": { + "type": "integer", + "min": 1, + "max": 1000000, + "documentation": "

    The routing rule priority.

    " + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 100 + }, + "RoutingRules": { + "type": "structure", + "documentation": "

    A collection of routing rules.

    ", + "members": { + "NextToken": { + "shape": "NextToken", + "locationName": "nextToken" + }, + "RoutingRules": { + "shape": "__listOfRoutingRule", + "locationName": "routingRules" + } + } + }, "SecurityGroupIdList": { "type": "list", "documentation": "

    A list of security group IDs for the VPC link.

    ", @@ -7442,8 +8091,7 @@ }, "TagResourceResponse": { "type": "structure", - "members": { - } + "members": {} }, "Tags": { "type": "map", @@ -8127,6 +8775,11 @@ "shape": "MutualTlsAuthenticationInput", "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " + }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " } }, "documentation": "

    Represents the input parameters for an UpdateDomainName request.

    " @@ -8149,6 +8802,11 @@ "shape": "MutualTlsAuthenticationInput", "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " + }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " } }, "documentation": "

    Updates a DomainName.

    ", @@ -8169,6 +8827,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -8179,6 +8841,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -9448,6 +10115,36 @@ "shape": "RouteResponse" } }, + "__listOfRoutingRule": { + "type": "list", + "member": { + "shape": "RoutingRule" + } + }, + "__listOfRoutingRuleAction": { + "type": "list", + "member": { + "shape": "RoutingRuleAction" + } + }, + "__listOfRoutingRuleCondition": { + "type": "list", + "member": { + "shape": "RoutingRuleCondition" + } + }, + "__listOfRoutingRuleMatchHeaderValue": { + "type": "list", + "member": { + "shape": "RoutingRuleMatchHeaderValue" + } + }, + "__listOfSelectionKey": { + "type": "list", + "member": { + "shape": "SelectionKey" + } + }, "__listOfStage": { "type": "list", "member": { diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index e59ed6fe22ee..405abdf21915 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index a3b2ddbe03ef..7043923e3ecc 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appfabric/pom.xml b/services/appfabric/pom.xml index 9188f1a52b97..cd21052cca6d 100644 --- a/services/appfabric/pom.xml +++ b/services/appfabric/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appfabric AWS Java SDK :: Services :: App Fabric diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index 2cb2eea7b9e7..3ddfd05bcef6 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index 31682c96f365..2a3cc152bb72 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 3a4b1b83426b..34d885420f0d 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index 21de49c70b23..2c1c9bb2b69a 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 80f663ec314a..b9a8ce06fd06 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 857da88863dc..7cea32918db8 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/applicationsignals/pom.xml b/services/applicationsignals/pom.xml index 258f6f06caee..687e31643d75 100644 --- a/services/applicationsignals/pom.xml +++ b/services/applicationsignals/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT applicationsignals AWS Java SDK :: Services :: Application Signals diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index 9caeae6c1cc9..14c42e3fb350 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index 6f6478abb30c..4fa9dc2c58ff 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 7ac284e40ad7..b34444bdb7cb 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index d18ca90828f1..4d04575bf44f 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index 2d2ad5fce51b..723f353a0db8 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -1964,11 +1964,17 @@ }, "transitEncryptionEnabled":{ "shape":"Boolean", - "documentation":"

    Transit encryption flag when connecting to cache. You cannot update this setting after creation.

    " + "documentation":"

    Transit encryption flag when connecting to cache. You cannot update this setting after creation.

    ", + "deprecated":true, + "deprecatedMessage":"transitEncryptionEnabled attribute is deprecated. Encryption in transit is always enabled.", + "deprecatedSince":"5/15/2025" }, "atRestEncryptionEnabled":{ "shape":"Boolean", - "documentation":"

    At-rest encryption flag for cache. You cannot update this setting after creation.

    " + "documentation":"

    At-rest encryption flag for cache. You cannot update this setting after creation.

    ", + "deprecated":true, + "deprecatedMessage":"atRestEncryptionEnabled attribute is deprecated. Encryption at rest is always enabled.", + "deprecatedSince":"5/15/2025" }, "apiCachingBehavior":{ "shape":"ApiCachingBehavior", @@ -2690,8 +2696,7 @@ }, "DeleteApiCacheResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the output of a DeleteApiCache operation.

    " }, "DeleteApiKeyRequest":{ @@ -2717,8 +2722,7 @@ }, "DeleteApiKeyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteApiRequest":{ "type":"structure", @@ -2734,8 +2738,7 @@ }, "DeleteApiResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChannelNamespaceRequest":{ "type":"structure", @@ -2760,8 +2763,7 @@ }, "DeleteChannelNamespaceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDataSourceRequest":{ "type":"structure", @@ -2786,8 +2788,7 @@ }, "DeleteDataSourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDomainNameRequest":{ "type":"structure", @@ -2803,8 +2804,7 @@ }, "DeleteDomainNameResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFunctionRequest":{ "type":"structure", @@ -2829,8 +2829,7 @@ }, "DeleteFunctionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteGraphqlApiRequest":{ "type":"structure", @@ -2846,8 +2845,7 @@ }, "DeleteGraphqlApiResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteResolverRequest":{ "type":"structure", @@ -2879,8 +2877,7 @@ }, "DeleteResolverResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTypeRequest":{ "type":"structure", @@ -2905,8 +2902,7 @@ }, "DeleteTypeResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeltaSyncConfig":{ "type":"structure", @@ -2946,8 +2942,7 @@ }, "DisassociateApiResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateMergedGraphqlApiRequest":{ "type":"structure", @@ -3359,8 +3354,7 @@ }, "FlushApiCacheResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the output of a FlushApiCache operation.

    " }, "FunctionConfiguration":{ @@ -5294,8 +5288,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -5379,8 +5372,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateApiCacheRequest":{ "type":"structure", diff --git a/services/apptest/pom.xml b/services/apptest/pom.xml index e335a476e551..ed6703567636 100644 --- a/services/apptest/pom.xml +++ b/services/apptest/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT apptest AWS Java SDK :: Services :: App Test diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index f59822c29731..68fac0951178 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/artifact/pom.xml b/services/artifact/pom.xml index d8d217f2e5bd..559c76af43ff 100644 --- a/services/artifact/pom.xml +++ b/services/artifact/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT artifact AWS Java SDK :: Services :: Artifact diff --git a/services/athena/pom.xml b/services/athena/pom.xml index 65c94b8e0e29..7be176e9859c 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/athena/src/main/resources/codegen-resources/service-2.json b/services/athena/src/main/resources/codegen-resources/service-2.json index 18c356d7f696..5de96ff11c9e 100644 --- a/services/athena/src/main/resources/codegen-resources/service-2.json +++ b/services/athena/src/main/resources/codegen-resources/service-2.json @@ -2516,6 +2516,10 @@ "MaxResults":{ "shape":"MaxQueryResults", "documentation":"

    The maximum number of results (rows) to return in this request.

    " + }, + "QueryResultType":{ + "shape":"QueryResultType", + "documentation":"

    When you set this to DATA_ROWS or empty, GetQueryResults returns the query results in rows. If set to DATA_MANIFEST, it returns the manifest file in rows. Only the query types CREATE TABLE AS SELECT, UNLOAD, and INSERT can generate a manifest file. If you use DATA_MANIFEST for other query types, the query will fail.

    " } } }, @@ -3304,6 +3308,50 @@ } }, "Long":{"type":"long"}, + "ManagedQueryResultsConfiguration":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

    If set to true, allows you to store query results in Athena owned storage. If set to false, workgroup member stores query results in location specified under ResultConfiguration$OutputLocation. The default is false. A workgroup cannot have the ResultConfiguration$OutputLocation parameter when you set this field to true.

    " + }, + "EncryptionConfiguration":{ + "shape":"ManagedQueryResultsEncryptionConfiguration", + "documentation":"

    If you encrypt query and calculation results in Athena owned storage, this field indicates the encryption option (for example, SSE_KMS or CSE_KMS) and key information.

    " + } + }, + "documentation":"

    The configuration for storing results in Athena owned storage, which includes whether this feature is enabled; whether encryption configuration, if any, is used for encrypting query results.

    " + }, + "ManagedQueryResultsConfigurationUpdates":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"BoxedBoolean", + "documentation":"

    If set to true, specifies that Athena manages query results in Athena owned storage.

    " + }, + "EncryptionConfiguration":{ + "shape":"ManagedQueryResultsEncryptionConfiguration", + "documentation":"

    If you encrypt query and calculation results in Athena owned storage, this field indicates the encryption option (for example, SSE_KMS or CSE_KMS) and key information.

    " + }, + "RemoveEncryptionConfiguration":{ + "shape":"BoxedBoolean", + "documentation":"

    If set to true, it removes workgroup from Athena owned storage. The existing query results are cleaned up after 24hrs. You must provide query results in location specified under ResultConfiguration$OutputLocation.

    " + } + }, + "documentation":"

    Updates the configuration for managed query results.

    " + }, + "ManagedQueryResultsEncryptionConfiguration":{ + "type":"structure", + "required":["KmsKey"], + "members":{ + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

    The ARN of an KMS key for encrypting managed query results.

    " + } + }, + "documentation":"

    If you encrypt query and calculation results in Athena owned storage, this field indicates the encryption option (for example, SSE_KMS or CSE_KMS) and key information.

    " + }, "MaxApplicationDPUSizesCount":{ "type":"integer", "max":100, @@ -3645,7 +3693,11 @@ }, "StatementType":{ "shape":"StatementType", - "documentation":"

    The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE, EXPLAIN, DESCRIBE, or SHOW TABLES.

    " + "documentation":"

    The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE, or DESCRIBE TABLE.

    " + }, + "ManagedQueryResultsConfiguration":{ + "shape":"ManagedQueryResultsConfiguration", + "documentation":"

    The configuration for storing results in Athena owned storage, which includes whether this feature is enabled; whether encryption configuration, if any, is used for encrypting query results.

    " }, "ResultConfiguration":{ "shape":"ResultConfiguration", @@ -3798,6 +3850,13 @@ }, "documentation":"

    The completion date, current state, submission time, and state change reason (if applicable) for the query execution.

    " }, + "QueryResultType":{ + "type":"string", + "enum":[ + "DATA_MANIFEST", + "DATA_ROWS" + ] + }, "QueryResultsS3AccessGrantsConfiguration":{ "type":"structure", "required":[ @@ -4904,6 +4963,10 @@ "shape":"ResultConfiguration", "documentation":"

    The configuration for the workgroup, which includes the location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided.

    " }, + "ManagedQueryResultsConfiguration":{ + "shape":"ManagedQueryResultsConfiguration", + "documentation":"

    The configuration for storing results in Athena owned storage, which includes whether this feature is enabled; whether encryption configuration, if any, is used for encrypting query results.

    " + }, "EnforceWorkGroupConfiguration":{ "shape":"BoxedBoolean", "documentation":"

    If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings.

    " @@ -4962,6 +5025,10 @@ "shape":"ResultConfigurationUpdates", "documentation":"

    The result configuration information about the queries in this workgroup that will be updated. Includes the updated results location and an updated option for encrypting query results.

    " }, + "ManagedQueryResultsConfigurationUpdates":{ + "shape":"ManagedQueryResultsConfigurationUpdates", + "documentation":"

    Updates configuration information for managed query results in the workgroup.

    " + }, "PublishCloudWatchMetricsEnabled":{ "shape":"BoxedBoolean", "documentation":"

    Indicates whether this workgroup enables publishing metrics to Amazon CloudWatch.

    " diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index 0f2fd7297d1f..8a543bc464a2 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index 867d363b8432..1f8789957cf3 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index 6929ddade54a..1b2753c70891 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -1304,8 +1304,7 @@ }, "AttachLoadBalancerTargetGroupsResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "AttachLoadBalancerTargetGroupsType":{ "type":"structure", @@ -1326,8 +1325,7 @@ }, "AttachLoadBalancersResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "AttachLoadBalancersType":{ "type":"structure", @@ -1348,8 +1346,7 @@ }, "AttachTrafficSourcesResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "AttachTrafficSourcesType":{ "type":"structure", @@ -1817,7 +1814,7 @@ }, "DeviceName":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The device name assigned to the volume (for example, /dev/sdh or xvdh). For more information, see Device naming on Linux instances in the Amazon EC2 User Guide for Linux Instances.

    To define a block device mapping, set the device name and exactly one of the following properties: Ebs, NoDevice, or VirtualName.

    " + "documentation":"

    The device name assigned to the volume (for example, /dev/sdh or xvdh). For more information, see Device naming on Linux instances in the Amazon EC2 User Guide.

    To define a block device mapping, set the device name and exactly one of the following properties: Ebs, NoDevice, or VirtualName.

    " }, "Ebs":{ "shape":"Ebs", @@ -1947,8 +1944,7 @@ }, "CompleteLifecycleActionAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "CompleteLifecycleActionType":{ "type":"structure", @@ -1987,7 +1983,8 @@ "enum":[ "intel", "amd", - "amazon-web-services" + "amazon-web-services", + "apple" ] }, "CpuManufacturers":{ @@ -2071,7 +2068,7 @@ }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " + "documentation":"

    The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen5000", @@ -2153,11 +2150,11 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances.

    If you specify InstanceId, an ImageId is not required.

    " + "documentation":"

    The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide.

    If you specify InstanceId, an ImageId is not required.

    " }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide.

    " }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -2181,19 +2178,19 @@ }, "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

    Specifies the instance type of the EC2 instance. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances.

    If you specify InstanceId, an InstanceType is not required.

    " + "documentation":"

    Specifies the instance type of the EC2 instance. For information about available instance types, see Available instance types in the Amazon EC2 User Guide.

    If you specify InstanceId, an InstanceType is not required.

    " }, "KernelId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the kernel associated with the AMI.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The ID of the kernel associated with the AMI.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide.

    " }, "RamdiskId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the RAM disk to select.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The ID of the RAM disk to select.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide.

    " }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide.

    " }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", @@ -2209,7 +2206,7 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

    The default value is false.

    " + "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide.

    The default value is false.

    " }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", @@ -2286,8 +2283,7 @@ }, "DeleteLifecycleHookAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLifecycleHookType":{ "type":"structure", @@ -2366,8 +2362,7 @@ }, "DeleteWarmPoolAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWarmPoolType":{ "type":"structure", @@ -2833,8 +2828,7 @@ }, "DetachLoadBalancerTargetGroupsResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "DetachLoadBalancerTargetGroupsType":{ "type":"structure", @@ -2855,8 +2849,7 @@ }, "DetachLoadBalancersResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "DetachLoadBalancersType":{ "type":"structure", @@ -2877,8 +2870,7 @@ }, "DetachTrafficSourcesResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "DetachTrafficSourcesType":{ "type":"structure", @@ -3455,7 +3447,7 @@ }, "CpuManufacturers":{ "shape":"CpuManufacturers", - "documentation":"

    Lists which specific CPU manufacturers to include.

    • For instance types with Intel CPUs, specify intel.

    • For instance types with AMD CPUs, specify amd.

    • For instance types with Amazon Web Services CPUs, specify amazon-web-services.

    Don't confuse the CPU hardware manufacturer with the CPU hardware architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template.

    Default: Any manufacturer

    " + "documentation":"

    Lists which specific CPU manufacturers to include.

    • For instance types with Intel CPUs, specify intel.

    • For instance types with AMD CPUs, specify amd.

    • For instance types with Amazon Web Services CPUs, specify amazon-web-services.

    • For instance types with Apple CPUs, specify apple.

    Don't confuse the CPU hardware manufacturer with the CPU hardware architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template.

    Default: Any manufacturer

    " }, "MemoryGiBPerVCpu":{ "shape":"MemoryGiBPerVCpuRequest", @@ -3467,7 +3459,7 @@ }, "InstanceGenerations":{ "shape":"InstanceGenerations", - "documentation":"

    Indicates whether current or previous generation instance types are included.

    • For current generation instance types, specify current. The current generation includes EC2 instance types currently recommended for use. This typically includes the latest two to three generations in each instance family. For more information, see Instance types in the Amazon EC2 User Guide for Linux Instances.

    • For previous generation instance types, specify previous.

    Default: Any current or previous generation

    " + "documentation":"

    Indicates whether current or previous generation instance types are included.

    • For current generation instance types, specify current. The current generation includes EC2 instance types currently recommended for use. This typically includes the latest two to three generations in each instance family. For more information, see Instance types in the Amazon EC2 User Guide.

    • For previous generation instance types, specify previous.

    Default: Any current or previous generation

    " }, "SpotMaxPricePercentageOverLowestPrice":{ "shape":"NullablePositiveInteger", @@ -3487,7 +3479,7 @@ }, "BurstablePerformance":{ "shape":"BurstablePerformance", - "documentation":"

    Indicates whether burstable performance instance types are included, excluded, or required. For more information, see Burstable performance instances in the Amazon EC2 User Guide for Linux Instances.

    Default: excluded

    " + "documentation":"

    Indicates whether burstable performance instance types are included, excluded, or required. For more information, see Burstable performance instances in the Amazon EC2 User Guide.

    Default: excluded

    " }, "RequireHibernateSupport":{ "shape":"NullableBoolean", @@ -3499,7 +3491,7 @@ }, "LocalStorage":{ "shape":"LocalStorage", - "documentation":"

    Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see Amazon EC2 instance store in the Amazon EC2 User Guide for Linux Instances.

    Default: included

    " + "documentation":"

    Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see Amazon EC2 instance store in the Amazon EC2 User Guide.

    Default: included

    " }, "LocalStorageTypes":{ "shape":"LocalStorageTypes", @@ -3511,7 +3503,7 @@ }, "BaselineEbsBandwidthMbps":{ "shape":"BaselineEbsBandwidthMbpsRequest", - "documentation":"

    The minimum and maximum baseline bandwidth performance for an instance type, in Mbps. For more information, see Amazon EBS–optimized instances in the Amazon EC2 User Guide for Linux Instances.

    Default: No minimum or maximum limits

    " + "documentation":"

    The minimum and maximum baseline bandwidth performance for an instance type, in Mbps. For more information, see Amazon EBS–optimized instances in the Amazon EC2 User Guide.

    Default: No minimum or maximum limits

    " }, "AcceleratorTypes":{ "shape":"AcceleratorTypes", @@ -3546,7 +3538,7 @@ "documentation":"

    The baseline performance factors for the instance requirements.

    " } }, - "documentation":"

    The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.

    When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

    To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance types, you can use one of the following parameters, but not both in the same request:

    • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

    • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

    You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

    For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. For help determining which instance types match your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.

    When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

    To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance types, you can use one of the following parameters, but not both in the same request:

    • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

    • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

    You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

    For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. For help determining which instance types match your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the Amazon EC2 User Guide.

    " }, "InstanceReusePolicy":{ "type":"structure", @@ -3664,11 +3656,11 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Find a Linux AMI in the Amazon EC2 User Guide.

    " }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The name of the key pair.

    For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The name of the key pair.

    For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide.

    " }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -3684,11 +3676,11 @@ }, "UserData":{ "shape":"XmlStringUserData", - "documentation":"

    The user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data (Linux) and Instance metadata and user data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

    " + "documentation":"

    The user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

    " }, "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The instance type for the instances. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The instance type for the instances. For information about available instance types, see Available instance types in the Amazon EC2 User Guide.

    " }, "KernelId":{ "shape":"XmlStringMaxLen255", @@ -3700,7 +3692,7 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide.

    " }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", @@ -3720,7 +3712,7 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide.

    " }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", @@ -3811,7 +3803,7 @@ "members":{ "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon EC2 User Guide for Linux Instances.

    You can specify up to 40 instance types per Auto Scaling group.

    " + "documentation":"

    The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon EC2 User Guide.

    You can specify up to 40 instance types per Auto Scaling group.

    " }, "WeightedCapacity":{ "shape":"XmlStringMaxLen32", @@ -4308,7 +4300,7 @@ "documentation":"

    The maximum amount of network bandwidth, in gigabits per second (Gbps).

    " } }, - "documentation":"

    Specifies the minimum and maximum for the NetworkBandwidthGbps object when you specify InstanceRequirements for an Auto Scaling group.

    Setting the minimum bandwidth does not guarantee that your instance will achieve the minimum bandwidth. Amazon EC2 will identify instance types that support the specified minimum bandwidth, but the actual bandwidth of your instance might go below the specified minimum at times. For more information, see Available instance bandwidth in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    Specifies the minimum and maximum for the NetworkBandwidthGbps object when you specify InstanceRequirements for an Auto Scaling group.

    Setting the minimum bandwidth does not guarantee that your instance will achieve the minimum bandwidth. Amazon EC2 will identify instance types that support the specified minimum bandwidth, but the actual bandwidth of your instance might go below the specified minimum at times. For more information, see Available instance bandwidth in the Amazon EC2 User Guide.

    " }, "NetworkInterfaceCountRequest":{ "type":"structure", @@ -4678,8 +4670,7 @@ "ProtectedFromScaleIn":{"type":"boolean"}, "PutLifecycleHookAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "PutLifecycleHookType":{ "type":"structure", @@ -4860,8 +4851,7 @@ }, "PutWarmPoolAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "PutWarmPoolType":{ "type":"structure", @@ -4891,8 +4881,7 @@ }, "RecordLifecycleActionHeartbeatAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "RecordLifecycleActionHeartbeatType":{ "type":"structure", @@ -5368,8 +5357,7 @@ }, "SetInstanceProtectionAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "SetInstanceProtectionQuery":{ "type":"structure", @@ -5791,7 +5779,7 @@ }, "PlacementGroup":{ "shape":"UpdatePlacementGroupParam", - "documentation":"

    The name of an existing placement group into which to launch your instances. To remove the placement group setting, pass an empty string for placement-group. For more information about placement groups, see Placement groups in the Amazon EC2 User Guide for Linux Instances.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " + "documentation":"

    The name of an existing placement group into which to launch your instances. To remove the placement group setting, pass an empty string for placement-group. For more information about placement groups, see Placement groups in the Amazon EC2 User Guide.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen5000", diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 225f8c6b096d..1fbee4130651 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/b2bi/pom.xml b/services/b2bi/pom.xml index 2d985f925478..e07e8f8b7f31 100644 --- a/services/b2bi/pom.xml +++ b/services/b2bi/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT b2bi AWS Java SDK :: Services :: B2 Bi diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 76dcfdef2d2f..3e9446bf0bca 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index 4727e6f741e5..b438f2549002 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -1218,7 +1218,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Returns the tags assigned to the resource, such as a target recovery point, backup plan, or backup vault.

    ", + "documentation":"

    Returns the tags assigned to the resource, such as a target recovery point, backup plan, or backup vault.

    This operation returns results depending on the resource type used in the value for resourceArn. For example, recovery points of Amazon DynamoDB with Advanced Settings have an ARN (Amazon Resource Name) that begins with arn:aws:backup. Recovery points (backups) of DynamoDB without Advanced Settings enabled have an ARN that begins with arn:aws:dynamodb.

    When this operation is called and when you include values of resourceArn that have an ARN other than arn:aws:backup, it may return one of the exceptions listed below. To prevent this exception, include only values representing resource types that are fully managed by Backup. These have an ARN that begins arn:aws:backup and they are noted in the Feature availability by resource table.

    ", "idempotent":true }, "PutBackupVaultAccessPolicy":{ @@ -1375,7 +1375,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Attempts to cancel a job to create a one-time backup of a resource.

    This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for Lustre, Amazon FSx for NetApp ONTAP, Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune.

    " + "documentation":"

    Attempts to cancel a job to create a one-time backup of a resource.

    This action is not supported for the following services:

    • Amazon Aurora

    • Amazon DocumentDB (with MongoDB compatibility)

    • Amazon FSx for Lustre

    • Amazon FSx for NetApp ONTAP

    • Amazon FSx for OpenZFS

    • Amazon FSx for Windows File Server

    • Amazon Neptune

    • SAP HANA databases on Amazon EC2 instances

    • Amazon RDS

    " }, "TagResource":{ "name":"TagResource", @@ -1391,7 +1391,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN).

    This API is supported for recovery points for resource types including Aurora, Amazon DocumentDB. Amazon EBS, Amazon FSx, Neptune, and Amazon RDS.

    ", + "documentation":"

    Assigns a set of key-value pairs to a resource.

    ", "idempotent":true }, "UntagResource":{ @@ -1675,7 +1675,7 @@ }, "BackupSizeInBytes":{ "shape":"Long", - "documentation":"

    The size, in bytes, of a backup.

    " + "documentation":"

    The size, in bytes, of a backup (recovery point).

    This value can render differently depending on the resource type as Backup pulls in data information from other Amazon Web Services services. For example, the value returned may show a value of 0, which may differ from the anticipated value.

    The expected behavior for values by resource type are described as follows:

    • Amazon Aurora, Amazon DocumentDB, and Amazon Neptune do not have this value populate from the operation GetBackupJobStatus.

    • For Amazon DynamoDB with advanced features, this value refers to the size of the recovery point (backup).

    • Amazon EC2 and Amazon EBS show volume size (provisioned storage) returned as part of this value. Amazon EBS does not return backup size information; snapshot size will have the same value as the original resource that was backed up.

    • For Amazon EFS, this value refers to the delta bytes transferred during a backup.

    • Amazon FSx does not populate this value from the operation GetBackupJobStatus for FSx file systems.

    • An Amazon RDS instance will show as 0.

    • For virtual machines running VMware, this value is passed to Backup through an asynchronous workflow, which can mean this displayed value can under-represent the actual backup size.

    " }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -1956,7 +1956,7 @@ }, "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

    A cron expression in UTC specifying when Backup initiates a backup job. For more information about Amazon Web Services cron expressions, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.. Two examples of Amazon Web Services cron expressions are 15 * ? * * * (take a backup every hour at 15 minutes past the hour) and 0 12 * * ? * (take a backup every day at 12 noon UTC). For a table of examples, click the preceding link and scroll down the page.

    " + "documentation":"

    A cron expression in UTC specifying when Backup initiates a backup job. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    For more information about Amazon Web Services cron expressions, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.

    Two examples of Amazon Web Services cron expressions are 15 * ? * * * (take a backup every hour at 15 minutes past the hour) and 0 12 * * ? * (take a backup every day at 12 noon UTC).

    For a table of examples, click the preceding link and scroll down the page.

    " }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -2014,7 +2014,7 @@ }, "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

    A CRON expression in UTC specifying when Backup initiates a backup job.

    " + "documentation":"

    A CRON expression in UTC specifying when Backup initiates a backup job. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -2154,7 +2154,10 @@ "BACKUP_PLAN_CREATED", "BACKUP_PLAN_MODIFIED", "S3_BACKUP_OBJECT_FAILED", - "S3_RESTORE_OBJECT_FAILED" + "S3_RESTORE_OBJECT_FAILED", + "RECOVERY_POINT_INDEX_COMPLETED", + "RECOVERY_POINT_INDEX_DELETED", + "RECOVERY_POINT_INDEXING_FAILED" ] }, "BackupVaultEvents":{ @@ -2267,8 +2270,7 @@ }, "CancelLegalHoldOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "ComplianceResourceIdList":{ "type":"list", @@ -3285,7 +3287,7 @@ }, "BackupSizeInBytes":{ "shape":"Long", - "documentation":"

    The size, in bytes, of a backup.

    " + "documentation":"

    The size, in bytes, of a backup (recovery point).

    This value can render differently depending on the resource type as Backup pulls in data information from other Amazon Web Services services. For example, the value returned may show a value of 0, which may differ from the anticipated value.

    The expected behavior for values by resource type are described as follows:

    • Amazon Aurora, Amazon DocumentDB, and Amazon Neptune do not have this value populate from the operation GetBackupJobStatus.

    • For Amazon DynamoDB with advanced features, this value refers to the size of the recovery point (backup).

    • Amazon EC2 and Amazon EBS show volume size (provisioned storage) returned as part of this value. Amazon EBS does not return backup size information; snapshot size will have the same value as the original resource that was backed up.

    • For Amazon EFS, this value refers to the delta bytes transferred during a backup.

    • Amazon FSx does not populate this value from the operation GetBackupJobStatus for FSx file systems.

    • An Amazon RDS instance will show as 0.

    • For virtual machines running VMware, this value is passed to Backup through an asynchronous workflow, which can mean this displayed value can under-represent the actual backup size.

    " }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -3400,7 +3402,7 @@ }, "NumberOfRecoveryPoints":{ "shape":"long", - "documentation":"

    The number of recovery points that are stored in a backup vault.

    " + "documentation":"

    The number of recovery points that are stored in a backup vault.

    Recovery point count value displayed in the console can be an approximation. Use ListRecoveryPointsByBackupVault API to obtain the exact count.

    " }, "Locked":{ "shape":"Boolean", @@ -3492,8 +3494,7 @@ }, "DescribeGlobalSettingsInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeGlobalSettingsOutput":{ "type":"structure", @@ -3625,7 +3626,7 @@ }, "Status":{ "shape":"RecoveryPointStatus", - "documentation":"

    A status code specifying the state of the recovery point.

    PARTIAL status indicates Backup could not create the recovery point before the backup window closed. To increase your backup plan window using the API, see UpdateBackupPlan. You can also increase your backup plan window using the Console by choosing and editing your backup plan.

    EXPIRED status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.

    STOPPED status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup. For recovery points of Amazon S3, Amazon RDS, and Amazon Aurora resources, this status occurs when the retention period of a continuous backup rule is changed.

    To resolve STOPPED status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.

    For SAP HANA on Amazon EC2 STOPPED status occurs due to user action, application misconfiguration, or backup failure. To ensure that future continuous backups succeed, refer to the recovery point status and check SAP HANA for details.

    " + "documentation":"

    A status code specifying the state of the recovery point. For more information, see Recovery point status in the Backup Developer Guide.

    • CREATING status indicates that an Backup job has been initiated for a resource. The backup process has started and is actively processing a backup job for the associated recovery point.

    • AVAILABLE status indicates that the backup was successfully created for the recovery point. The backup process has completed without any issues, and the recovery point is now ready for use.

    • PARTIAL status indicates a composite recovery point has one or more nested recovery points that were not in the backup.

    • EXPIRED status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.

    • STOPPED status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup. For recovery points of Amazon S3, Amazon RDS, and Amazon Aurora resources, this status occurs when the retention period of a continuous backup rule is changed.

      To resolve STOPPED status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.

      For SAP HANA on Amazon EC2 STOPPED status occurs due to user action, application misconfiguration, or backup failure. To ensure that future continuous backups succeed, refer to the recovery point status and check SAP HANA for details.

    " }, "StatusMessage":{ "shape":"string", @@ -3699,8 +3700,7 @@ }, "DescribeRegionSettingsInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeRegionSettingsOutput":{ "type":"structure", @@ -6123,7 +6123,7 @@ }, "BackupVaultEvents":{ "shape":"BackupVaultEvents", - "documentation":"

    An array of events that indicate the status of jobs to back up resources to the backup vault.

    For common use cases and code samples, see Using Amazon SNS to track Backup events.

    The following events are supported:

    • BACKUP_JOB_STARTED | BACKUP_JOB_COMPLETED

    • COPY_JOB_STARTED | COPY_JOB_SUCCESSFUL | COPY_JOB_FAILED

    • RESTORE_JOB_STARTED | RESTORE_JOB_COMPLETED | RECOVERY_POINT_MODIFIED

    • S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED

    The list below includes both supported events and deprecated events that are no longer in use (for reference). Deprecated events do not return statuses or notifications. Refer to the list above for the supported events.

    " + "documentation":"

    An array of events that indicate the status of jobs to back up resources to the backup vault.

    For common use cases and code samples, see Using Amazon SNS to track Backup events.

    The following events are supported:

    • BACKUP_JOB_STARTED | BACKUP_JOB_COMPLETED | BACKUP_JOB_FAILED

    • COPY_JOB_STARTED | COPY_JOB_SUCCESSFUL | COPY_JOB_FAILED

    • RESTORE_JOB_STARTED | RESTORE_JOB_COMPLETED | RECOVERY_POINT_MODIFIED

    • S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED

    • RECOVERY_POINT_INDEX_COMPLETED | RECOVERY_POINT_INDEX_DELETED | RECOVERY_POINT_INDEXING_FAILED

    The list below includes both supported events and deprecated events that are no longer in use (for reference). Deprecated events do not return statuses or notifications. Refer to the list above for the supported events.

    " } } }, @@ -6387,7 +6387,10 @@ "COMPLETED", "PARTIAL", "DELETING", - "EXPIRED" + "EXPIRED", + "AVAILABLE", + "STOPPED", + "CREATING" ] }, "RecoveryPointsList":{ @@ -6791,7 +6794,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -6844,7 +6847,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -6888,7 +6891,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -6910,7 +6913,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -7005,7 +7008,7 @@ }, "ValidationWindowHours":{ "shape":"integer", - "documentation":"

    This is amount of hours (1 to 168) available to run a validation script on the data. The data will be deleted upon the completion of the validation script or the end of the specified retention period, whichever comes first.

    " + "documentation":"

    This is amount of hours (0 to 168) available to run a validation script on the data. The data will be deleted upon the completion of the validation script or the end of the specified retention period, whichever comes first.

    " } }, "documentation":"

    This contains metadata about a specific restore testing selection.

    ProtectedResourceType is required, such as Amazon EBS or Amazon EC2.

    This consists of RestoreTestingSelectionName, ProtectedResourceType, and one of the following:

    • ProtectedResourceArns

    • ProtectedResourceConditions

    Each protected resource type can have one single value.

    A restore testing selection can include a wildcard value (\"*\") for ProtectedResourceArns along with ProtectedResourceConditions. Alternatively, you can include up to 30 specific protected resource ARNs in ProtectedResourceArns.

    ProtectedResourceConditions examples include as StringEquals and StringNotEquals.

    " @@ -7387,7 +7390,7 @@ "members":{ "ResourceArn":{ "shape":"ARN", - "documentation":"

    An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource.

    ARNs that do not include backup are incompatible with tagging. TagResource and UntagResource with invalid ARNs will result in an error. Acceptable ARN content can include arn:aws:backup:us-east. Invalid ARN content may look like arn:aws:ec2:us-east.

    ", + "documentation":"

    The ARN that uniquely identifies the resource.

    ", "location":"uri", "locationName":"resourceArn" }, diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index 679f1fdb2fc4..01f3c84d41f8 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupsearch/pom.xml b/services/backupsearch/pom.xml index 39e896d2e9e8..59b0a90b208a 100644 --- a/services/backupsearch/pom.xml +++ b/services/backupsearch/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT backupsearch AWS Java SDK :: Services :: Backup Search diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 5b9da47c4466..e8ddf726932d 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/bcmdataexports/pom.xml b/services/bcmdataexports/pom.xml index 5b6b9bbcd8dd..f57c7f6f8c62 100644 --- a/services/bcmdataexports/pom.xml +++ b/services/bcmdataexports/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bcmdataexports AWS Java SDK :: Services :: BCM Data Exports diff --git a/services/bcmpricingcalculator/pom.xml b/services/bcmpricingcalculator/pom.xml index d99ccbcbec37..cb64d7757345 100644 --- a/services/bcmpricingcalculator/pom.xml +++ b/services/bcmpricingcalculator/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bcmpricingcalculator AWS Java SDK :: Services :: BCM Pricing Calculator diff --git a/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json b/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json index e78df9022ce4..79d37d66ff91 100644 --- a/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json +++ b/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json @@ -3182,7 +3182,7 @@ "documentation":"

    A token to retrieve the next page of results.

    " }, "maxResults":{ - "shape":"MaxResults", + "shape":"WorkloadEstimateUsageMaxResults", "documentation":"

    The maximum number of results to return per page.

    " } } @@ -3286,7 +3286,8 @@ "MaxResults":{ "type":"integer", "box":true, - "max":25 + "max":25, + "min":1 }, "NegateReservedInstanceAction":{ "type":"structure", @@ -3331,13 +3332,14 @@ "type":"string", "enum":[ "BEFORE_DISCOUNTS", - "AFTER_DISCOUNTS" + "AFTER_DISCOUNTS", + "AFTER_DISCOUNTS_AND_COMMITMENTS" ] }, "RateTypes":{ "type":"list", "member":{"shape":"RateType"}, - "max":2, + "max":3, "min":1 }, "ReservedInstanceInstanceCount":{ @@ -3869,7 +3871,8 @@ "type":"string", "enum":[ "BEFORE_DISCOUNTS", - "AFTER_DISCOUNTS" + "AFTER_DISCOUNTS", + "AFTER_DISCOUNTS_AND_COMMITMENTS" ] }, "WorkloadEstimateStatus":{ @@ -4004,6 +4007,12 @@ "type":"list", "member":{"shape":"WorkloadEstimateUsageItem"} }, + "WorkloadEstimateUsageMaxResults":{ + "type":"integer", + "box":true, + "max":300, + "min":1 + }, "WorkloadEstimateUsageQuantity":{ "type":"structure", "members":{ diff --git a/services/bedrock/pom.xml b/services/bedrock/pom.xml index 6d5109e00932..7b18a11b04b0 100644 --- a/services/bedrock/pom.xml +++ b/services/bedrock/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bedrock AWS Java SDK :: Services :: Bedrock diff --git a/services/bedrock/src/main/resources/codegen-resources/service-2.json b/services/bedrock/src/main/resources/codegen-resources/service-2.json index 682eddaa47fa..d616ae8c7e28 100644 --- a/services/bedrock/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrock/src/main/resources/codegen-resources/service-2.json @@ -32,6 +32,27 @@ ], "documentation":"

    Deletes a batch of evaluation jobs. An evaluation job can only be deleted if it has following status FAILED, COMPLETED, and STOPPED. You can request up to 25 model evaluation jobs be deleted in a single request.

    " }, + "CreateCustomModel":{ + "name":"CreateCustomModel", + "http":{ + "method":"POST", + "requestUri":"/custom-models/create-custom-model", + "responseCode":200 + }, + "input":{"shape":"CreateCustomModelRequest"}, + "output":{"shape":"CreateCustomModelResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"TooManyTagsException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a new custom model in Amazon Bedrock from an existing SageMaker AI-trained Amazon Nova model stored in an Amazon-managed Amazon S3 bucket. After the model is active, you can use it for inference.

    To use the model for inference, you must purchase Provisioned Throughput for it. You can't use On-demand inference with these custom models. For more information about Provisioned Throughput, see Provisioned Throughput.

    The model appears in ListCustomModels with a customizationType of imported. To track the status of the new model, you use the GetCustomModel API operation. The model can be in the following states:

    • Creating - Initial state during validation and registration

    • Active - Model is ready for use in inference

    • Failed - Creation process encountered an error

    For more information about creating custom models, including specific model requirements, see Import a SageMaker AI-trained Amazon Nova model in the Amazon Bedrock User Guide.

    You use the CreateCustomModel API to import only SageMaker AI-trained Amazon Nova models. To import open-source models, you use the CreateModelImportJob.

    Related APIs

    " + }, "CreateEvaluationJob":{ "name":"CreateEvaluationJob", "http":{ @@ -452,7 +473,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide.

    " + "documentation":"

    Get the properties associated with a Amazon Bedrock custom model that you have created. For more information, see Custom models in the Amazon Bedrock User Guide.

    " }, "GetEvaluationJob":{ "name":"GetEvaluationJob", @@ -1422,6 +1443,50 @@ "type":"string", "pattern":".*[a-z]{1,20}/.{1,20}.*" }, + "CreateCustomModelRequest":{ + "type":"structure", + "required":[ + "modelName", + "modelSourceConfig" + ], + "members":{ + "modelName":{ + "shape":"CustomModelName", + "documentation":"

    A unique name for the custom model.

    " + }, + "modelSourceConfig":{ + "shape":"ModelDataSource", + "documentation":"

    The data source for the model. The Amazon S3 URI in the model source must be for the Amazon-managed Amazon S3 bucket containing your model artifacts. SageMaker AI creates this bucket when you run your first SageMaker AI training job.

    " + }, + "modelKmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of the customer managed KMS key to encrypt the custom model. If you don't provide a KMS key, Amazon Bedrock uses an Amazon Web Services-managed KMS key to encrypt the model.

    If you provide a customer managed KMS key, your Amazon Bedrock service role must have permissions to use it. For more information see Encryption of imported models.

    " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock assumes to perform tasks on your behalf. This role must have permissions to access the Amazon S3 bucket containing your model artifacts and the KMS key (if specified). For more information, see Setting up an IAM service role for importing models in the Amazon Bedrock User Guide.

    " + }, + "modelTags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs to associate with the custom model resource. You can use these tags to organize and identify your resources.

    For more information, see Tagging resources in the Amazon Bedrock User Guide.

    " + }, + "clientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

    ", + "idempotencyToken":true + } + } + }, + "CreateCustomModelResponse":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "modelArn":{ + "shape":"ModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the new custom model.

    " + } + } + }, "CreateEvaluationJobRequest":{ "type":"structure", "required":[ @@ -2096,7 +2161,7 @@ "type":"string", "max":1011, "min":20, - "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}/[a-z0-9]{12}" + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:custom-model/(imported|[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}/[a-z0-9]{12})" }, "CustomModelName":{ "type":"string", @@ -2141,6 +2206,10 @@ "ownerAccountId":{ "shape":"AccountId", "documentation":"

    The unique identifier of the account that owns the model.

    " + }, + "modelStatus":{ + "shape":"ModelStatus", + "documentation":"

    The current status of the custom model. Possible values include:

    • Creating - The model is being created and validated.

    • Active - The model has been successfully created and is ready for use.

    • Failed - The model creation process failed.

    " } }, "documentation":"

    Summary information for a custom model.

    " @@ -2183,7 +2252,8 @@ "enum":[ "FINE_TUNING", "CONTINUED_PRE_TRAINING", - "DISTILLATION" + "DISTILLATION", + "IMPORTED" ] }, "DataProcessingDetails":{ @@ -3151,10 +3221,6 @@ "required":[ "modelArn", "modelName", - "jobArn", - "baseModelArn", - "trainingDataConfig", - "outputDataConfig", "creationTime" ], "members":{ @@ -3172,7 +3238,7 @@ }, "jobArn":{ "shape":"ModelCustomizationJobArn", - "documentation":"

    Job Amazon Resource Name (ARN) associated with this model.

    " + "documentation":"

    Job Amazon Resource Name (ARN) associated with this model. For models that you create with the CreateCustomModel API operation, this is NULL.

    " }, "baseModelArn":{ "shape":"ModelArn", @@ -3217,6 +3283,14 @@ "customizationConfig":{ "shape":"CustomizationConfig", "documentation":"

    The customization configuration for the custom model.

    " + }, + "modelStatus":{ + "shape":"ModelStatus", + "documentation":"

    The current status of the custom model. Possible values include:

    • Creating - The model is being created and validated.

    • Active - The model has been successfully created and is ready for use.

    • Failed - The model creation process failed. Check the failureMessage field for details.

    " + }, + "failureMessage":{ + "shape":"ErrorMessage", + "documentation":"

    A failure message for any issues that occurred when creating the custom model. This is included for only a failed CreateCustomModel operation.

    " } } }, @@ -3697,14 +3771,14 @@ "shape":"ModelCustomizationJobStatus", "documentation":"

    The status of the job. A successful job transitions from in-progress to completed when the output model is ready to use. If the job failed, the failure message contains information about why the job failed.

    " }, - "failureMessage":{ - "shape":"ErrorMessage", - "documentation":"

    Information about why the job failed.

    " - }, "statusDetails":{ "shape":"StatusDetails", "documentation":"

    For a Distillation job, the details about the statuses of the sub-tasks of the customization job.

    " }, + "failureMessage":{ + "shape":"ErrorMessage", + "documentation":"

    Information about why the job failed.

    " + }, "creationTime":{ "shape":"Timestamp", "documentation":"

    Time that the resource was created.

    " @@ -5706,6 +5780,12 @@ "documentation":"

    Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false).

    ", "location":"querystring", "locationName":"isOwned" + }, + "modelStatus":{ + "shape":"ModelStatus", + "documentation":"

    The status of them model to filter results by. Possible values include:

    • Creating - Include only models that are currently being created and validated.

    • Active - Include only models that have been successfully created and are ready for use.

    • Failed - Include only models where the creation process failed.

    If you don't specify a status, the API returns models in all states.

    ", + "location":"querystring", + "locationName":"modelStatus" } } }, @@ -6565,7 +6645,7 @@ "type":"string", "max":1011, "min":20, - "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))" + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/((imported)|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))" }, "ModelCopyJobArn":{ "type":"string", @@ -6708,14 +6788,14 @@ "shape":"ModelCustomizationJobStatus", "documentation":"

    Status of the customization job.

    " }, - "lastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

    Time that the customization job was last modified.

    " - }, "statusDetails":{ "shape":"StatusDetails", "documentation":"

    Details about the status of the data processing sub-task of the job.

    " }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

    Time that the customization job was last modified.

    " + }, "creationTime":{ "shape":"Timestamp", "documentation":"

    Creation time of the custom model.

    " @@ -6748,10 +6828,10 @@ "members":{ "s3DataSource":{ "shape":"S3DataSource", - "documentation":"

    The Amazon S3 data source of the imported model.

    " + "documentation":"

    The Amazon S3 data source of the model to import.

    " } }, - "documentation":"

    Data source for the imported model.

    ", + "documentation":"

    The data source of the model to import.

    ", "union":true }, "ModelId":{ @@ -6764,7 +6844,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)" + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/((imported)|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)" }, "ModelImportJobArn":{ "type":"string", @@ -7042,6 +7122,14 @@ "min":0, "pattern":".*arn:aws:sagemaker:.*:hub-content/SageMakerPublicHub/Model/.*" }, + "ModelStatus":{ + "type":"string", + "enum":[ + "Active", + "Creating", + "Failed" + ] + }, "NonBlankString":{ "type":"string", "pattern":"[\\s\\S]*" @@ -7685,7 +7773,7 @@ "documentation":"

    The URI of the Amazon S3 data source.

    " } }, - "documentation":"

    The Amazon S3 data source of the imported job.

    " + "documentation":"

    The Amazon S3 data source of the model to import. For the CreateCustomModel API operation, you must specify the Amazon S3 URI for the Amazon-managed Amazon S3 bucket containing your model artifacts. SageMaker AI creates this bucket when you run your first SageMaker AI training job.

    " }, "S3InputFormat":{ "type":"string", @@ -7962,7 +8050,7 @@ "type":"string", "max":1011, "min":20, - "pattern":".*(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job|custom-model)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:automated-reasoning-policy/[a-zA-Z0-9]+(:[a-zA-Z0-9]+)?$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model|async-invoke|provisioned-model-v2|provisioned-model-reservation|prompt-router)/[a-z0-9]{12}$))).*" + "pattern":".*(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:custom-model/([a-z0-9-]{1,63}[.][a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?|imported)/[a-z0-9]{12}$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:automated-reasoning-policy/[a-zA-Z0-9]+(:[a-zA-Z0-9]+)?$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model|async-invoke|provisioned-model-v2|provisioned-model-reservation|prompt-router|custom-model-deployment)/[a-z0-9]{12}$))).*" }, "TeacherModelConfig":{ "type":"structure", diff --git a/services/bedrockagent/pom.xml b/services/bedrockagent/pom.xml index 4f2e5251f2ae..00f9a874daa3 100644 --- a/services/bedrockagent/pom.xml +++ b/services/bedrockagent/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bedrockagent AWS Java SDK :: Services :: Bedrock Agent diff --git a/services/bedrockagent/src/main/resources/codegen-resources/service-2.json b/services/bedrockagent/src/main/resources/codegen-resources/service-2.json index 7c2a1ee3dccc..087457238c07 100644 --- a/services/bedrockagent/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockagent/src/main/resources/codegen-resources/service-2.json @@ -1702,7 +1702,7 @@ }, "parentActionGroupSignatureParams":{ "shape":"ActionGroupSignatureParams", - "documentation":"

    The configuration settings for a computer use action.

    Computer use is a new Anthropic Claude model capability (in beta) available with Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

    " + "documentation":"

    The configuration settings for a computer use action.

    Computer use is a new Anthropic Claude model capability (in beta) available with Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

    " }, "parentActionSignature":{ "shape":"ActionGroupSignature", @@ -1752,6 +1752,10 @@ "shape":"Id", "documentation":"

    The unique identifier of the agent.

    " }, + "aliasInvocationState":{ + "shape":"AliasInvocationState", + "documentation":"

    The invocation state for the agent alias. If the agent alias is running, the value is ACCEPT_INVOCATIONS. If the agent alias is paused, the value is REJECT_INVOCATIONS. Use the UpdateAgentAlias operation to change the invocation state.

    " + }, "clientToken":{ "shape":"ClientToken", "documentation":"

    A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

    " @@ -1874,6 +1878,10 @@ "shape":"AgentAliasStatus", "documentation":"

    The status of the alias.

    " }, + "aliasInvocationState":{ + "shape":"AliasInvocationState", + "documentation":"

    The invocation state for the agent alias. If the agent alias is running, the value is ACCEPT_INVOCATIONS. If the agent alias is paused, the value is REJECT_INVOCATIONS. Use the UpdateAgentAlias operation to change the invocation state.

    " + }, "createdAt":{ "shape":"DateTimestamp", "documentation":"

    The time at which the alias of the agent was created.

    " @@ -2324,6 +2332,14 @@ }, "documentation":"

    Contains details about a version of an agent.

    " }, + "AliasInvocationState":{ + "type":"string", + "documentation":"

    Enum representing the invocation state of an agent alias

    ", + "enum":[ + "ACCEPT_INVOCATIONS", + "REJECT_INVOCATIONS" + ] + }, "AnyToolChoice":{ "type":"structure", "members":{ @@ -2883,11 +2899,11 @@ }, "parentActionGroupSignature":{ "shape":"ActionGroupSignature", - "documentation":"

    Specify a built-in or computer use action for this action group. If you specify a value, you must leave the description, apiSchema, and actionGroupExecutor fields empty for this action group.

    • To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput.

    • To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter.

    • To allow your agent to use an Anthropic computer use tool, specify one of the following values.

      Computer use is a new Anthropic Claude model capability (in beta) available with Anthropic Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. When operating computer use functionality, we recommend taking additional security precautions, such as executing computer actions in virtual environments with restricted data access and limited internet connectivity. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

      • ANTHROPIC.Computer - Gives the agent permission to use the mouse and keyboard and take screenshots.

      • ANTHROPIC.TextEditor - Gives the agent permission to view, create and edit files.

      • ANTHROPIC.Bash - Gives the agent permission to run commands in a bash shell.

    " + "documentation":"

    Specify a built-in or computer use action for this action group. If you specify a value, you must leave the description, apiSchema, and actionGroupExecutor fields empty for this action group.

    • To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput.

    • To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter.

    • To allow your agent to use an Anthropic computer use tool, specify one of the following values.

      Computer use is a new Anthropic Claude model capability (in beta) available with Anthropic Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. When operating computer use functionality, we recommend taking additional security precautions, such as executing computer actions in virtual environments with restricted data access and limited internet connectivity. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

      • ANTHROPIC.Computer - Gives the agent permission to use the mouse and keyboard and take screenshots.

      • ANTHROPIC.TextEditor - Gives the agent permission to view, create and edit files.

      • ANTHROPIC.Bash - Gives the agent permission to run commands in a bash shell.

    " }, "parentActionGroupSignatureParams":{ "shape":"ActionGroupSignatureParams", - "documentation":"

    The configuration settings for a computer use action.

    Computer use is a new Anthropic Claude model capability (in beta) available with Anthropic Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

    " + "documentation":"

    The configuration settings for a computer use action.

    Computer use is a new Anthropic Claude model capability (in beta) available with Anthropic Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

    " } } }, @@ -7586,7 +7602,7 @@ "documentation":"

    The definition of the DoWhile loop nodes and connections between nodes in the flow.

    " } }, - "documentation":"

    Contains configurations for the nodes of a DoWhile loop in your flow.

    A DoWhile loop is made up of the following nodes:

    • Loop - The container node that holds the loop's flow definition. This node encompasses the entire loop structure.

    • LoopInput - The entry point node for the loop. This node receives inputs from nodes outside the loop and from previous loop iterations.

    • Body nodes - These can be

    • LoopController - The node that evaluates whether the loop should continue or exit based on a condition.

    These nodes work together to create a loop that runs at least once and continues until a specified condition is met or a maximum number of iterations is reached.

    " + "documentation":"

    Contains configurations for the nodes of a DoWhile loop in your flow.

    A DoWhile loop is made up of the following nodes:

    • Loop - The container node that holds the loop's flow definition. This node encompasses the entire loop structure.

    • LoopInput - The entry point node for the loop. This node receives inputs from nodes outside the loop and from previous loop iterations.

    • Body nodes - The processing nodes that execute within each loop iteration. These can be nodes for handling data in your flow, such as a prompt or Lambda function nodes. Some node types aren't supported inside a DoWhile loop body. For more information, see LoopIncompatibleNodeTypeFlowValidationDetails.

    • LoopController - The node that evaluates whether the loop should continue or exit based on a condition.

    These nodes work together to create a loop that runs at least once and continues until a specified condition is met or a maximum number of iterations is reached.

    " }, "LoopIncompatibleNodeTypeFlowValidationDetails":{ "type":"structure", @@ -8630,7 +8646,7 @@ }, "promptState":{ "shape":"PromptState", - "documentation":"

    Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows.

    • PRE_PROCESSINGENABLED

    • ORCHESTRATIONENABLED

    • KNOWLEDGE_BASE_RESPONSE_GENERATIONENABLED

    • POST_PROCESSINGDISABLED

    " + "documentation":"

    Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows.

    • PRE_PROCESSINGDISABLED

    • ORCHESTRATIONENABLED

    • KNOWLEDGE_BASE_RESPONSE_GENERATIONENABLED

    • POST_PROCESSINGDISABLED

    " }, "promptType":{ "shape":"PromptType", @@ -10631,11 +10647,11 @@ }, "parentActionGroupSignature":{ "shape":"ActionGroupSignature", - "documentation":"

    Update the built-in or computer use action for this action group. If you specify a value, you must leave the description, apiSchema, and actionGroupExecutor fields empty for this action group.

    • To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput.

    • To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter.

    • To allow your agent to use an Anthropic computer use tool, specify one of the following values.

      Computer use is a new Anthropic Claude model capability (in beta) available with Anthropic Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. When operating computer use functionality, we recommend taking additional security precautions, such as executing computer actions in virtual environments with restricted data access and limited internet connectivity. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

      • ANTHROPIC.Computer - Gives the agent permission to use the mouse and keyboard and take screenshots.

      • ANTHROPIC.TextEditor - Gives the agent permission to view, create and edit files.

      • ANTHROPIC.Bash - Gives the agent permission to run commands in a bash shell.

    During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

    " + "documentation":"

    Update the built-in or computer use action for this action group. If you specify a value, you must leave the description, apiSchema, and actionGroupExecutor fields empty for this action group.

    • To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput.

    • To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter.

    • To allow your agent to use an Anthropic computer use tool, specify one of the following values.

      Computer use is a new Anthropic Claude model capability (in beta) available with Anthropic Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. When operating computer use functionality, we recommend taking additional security precautions, such as executing computer actions in virtual environments with restricted data access and limited internet connectivity. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

      • ANTHROPIC.Computer - Gives the agent permission to use the mouse and keyboard and take screenshots.

      • ANTHROPIC.TextEditor - Gives the agent permission to view, create and edit files.

      • ANTHROPIC.Bash - Gives the agent permission to run commands in a bash shell.

    During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

    " }, "parentActionGroupSignatureParams":{ "shape":"ActionGroupSignatureParams", - "documentation":"

    The configuration settings for a computer use action.

    Computer use is a new Anthropic Claude model capability (in beta) available with Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

    " + "documentation":"

    The configuration settings for a computer use action.

    Computer use is a new Anthropic Claude model capability (in beta) available with Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.

    " } } }, @@ -10673,6 +10689,10 @@ "location":"uri", "locationName":"agentId" }, + "aliasInvocationState":{ + "shape":"AliasInvocationState", + "documentation":"

    The invocation state for the agent alias. To pause the agent alias, set the value to REJECT_INVOCATIONS. To start the agent alias running again, set the value to ACCEPT_INVOCATIONS. Use the GetAgentAlias, or ListAgentAliases, operation to get the invocation state of an agent alias.

    " + }, "description":{ "shape":"Description", "documentation":"

    Specifies a new description for the alias.

    " diff --git a/services/bedrockagentruntime/pom.xml b/services/bedrockagentruntime/pom.xml index 6156ac92c20c..025df87bd220 100644 --- a/services/bedrockagentruntime/pom.xml +++ b/services/bedrockagentruntime/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bedrockagentruntime AWS Java SDK :: Services :: Bedrock Agent Runtime diff --git a/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json b/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json index 1c49c4305e3e..e56955988f23 100644 --- a/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json @@ -178,7 +178,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Retrieves the flow definition snapshot used for an asynchronous execution. The snapshot represents the flow metadata and definition as it existed at the time the asynchronous execution was started. Note that even if the flow is edited after an execution starts, the snapshot connected to the execution remains unchanged.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    " + "documentation":"

    Retrieves the flow definition snapshot used for a flow execution. The snapshot represents the flow metadata and definition as it existed at the time the execution was started. Note that even if the flow is edited after an execution starts, the snapshot connected to the execution remains unchanged.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " }, "GetFlowExecution":{ "name":"GetFlowExecution", @@ -196,7 +196,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Retrieves details about a specific asynchronous execution of a flow, including its status, start and end times, and any errors that occurred during execution.

    " + "documentation":"

    Retrieves details about a specific flow execution, including its status, start and end times, and any errors that occurred during execution.

    " }, "GetInvocationStep":{ "name":"GetInvocationStep", @@ -317,7 +317,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Lists events that occurred during an asynchronous execution of a flow. Events provide detailed information about the execution progress, including node inputs and outputs, flow inputs and outputs, condition results, and failure events.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    " + "documentation":"

    Lists events that occurred during a flow execution. Events provide detailed information about the execution progress, including node inputs and outputs, flow inputs and outputs, condition results, and failure events.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " }, "ListFlowExecutions":{ "name":"ListFlowExecutions", @@ -335,7 +335,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Lists all asynchronous executions for a flow. Results can be paginated and include summary information about each execution, such as status, start and end times, and the execution's Amazon Resource Name (ARN).

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    " + "documentation":"

    Lists all executions of a flow. Results can be paginated and include summary information about each execution, such as status, start and end times, and the execution's Amazon Resource Name (ARN).

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " }, "ListInvocationSteps":{ "name":"ListInvocationSteps", @@ -556,7 +556,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Starts an asynchronous execution of an Amazon Bedrock flow. Unlike synchronous flows that run until completion or time out after five minutes, you can run asynchronous flows for longer durations. Asynchronous flows also yield control so that your application can perform other tasks.

    This operation returns an Amazon Resource Name (ARN) that you can use to track and manage your flow's async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    " + "documentation":"

    Starts an execution of an Amazon Bedrock flow. Unlike flows that run until completion or time out after five minutes, flow executions let you run flows asynchronously for longer durations. Flow executions also yield control so that your application can perform other tasks.

    This operation returns an Amazon Resource Name (ARN) that you can use to track and manage your flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " }, "StopFlowExecution":{ "name":"StopFlowExecution", @@ -577,7 +577,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Stops an Amazon Bedrock flow's asynchronous execution. This operation prevents further processing of the flow and changes the execution status to Aborted.

    " + "documentation":"

    Stops an Amazon Bedrock flow's execution. This operation prevents further processing of the flow and changes the execution status to Aborted.

    " }, "TagResource":{ "name":"TagResource", @@ -1461,7 +1461,7 @@ "documentation":"

    The timestamp when the condition evaluation occurred.

    " } }, - "documentation":"

    Contains information about a condition evaluation result during an async execution. This event is generated when a condition node in the flow evaluates its conditions.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about a condition evaluation result during a flow execution. This event is generated when a condition node in the flow evaluates its conditions.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "ConfirmationState":{ @@ -2076,7 +2076,7 @@ "documentation":"

    The document content of the field, which can contain text or structured data.

    " } }, - "documentation":"

    Contains the content of an async execution input or output field.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains the content of an flow execution input or output field.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true, "union":true }, @@ -2096,7 +2096,7 @@ "documentation":"

    The name of the node in the flow where the error occurred (if applicable).

    " } }, - "documentation":"

    Contains information about an error that occurred during an async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    " + "documentation":"

    Contains information about an error that occurred during an flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " }, "FlowExecutionErrorType":{ "type":"string", @@ -2111,7 +2111,7 @@ "members":{ "conditionResultEvent":{ "shape":"ConditionResultEvent", - "documentation":"

    Contains information about a condition evaluation result during the async execution. This event is generated when a condition node in the flow evaluates its conditions.

    " + "documentation":"

    Contains information about a condition evaluation result during the flow execution. This event is generated when a condition node in the flow evaluates its conditions.

    " }, "flowFailureEvent":{ "shape":"FlowFailureEvent", @@ -2138,7 +2138,7 @@ "documentation":"

    Contains information about the outputs produced by a specific node during execution.

    " } }, - "documentation":"

    Represents an event that occurred during an async execution. This is a union type that can contain one of several event types, such as node input and output events; flow input and output events; condition node result events, or failure events.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Represents an event that occurred during an flow execution. This is a union type that can contain one of several event types, such as node input and output events; flow input and output events; condition node result events, or failure events.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "union":true }, "FlowExecutionEventType":{ @@ -2187,7 +2187,7 @@ "documentation":"

    The timestamp when the inputs are provided.

    " } }, - "documentation":"

    Contains information about the inputs provided to the flow at the start of async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about the inputs provided to the flow at the start of a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "FlowExecutionName":{ @@ -2217,7 +2217,7 @@ "documentation":"

    The timestamp when the outputs are produced.

    " } }, - "documentation":"

    Contains information about the outputs produced by the flow during an async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about the outputs produced by the flow during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "FlowExecutionRoleArn":{ @@ -2255,15 +2255,15 @@ "members":{ "createdAt":{ "shape":"DateTimestamp", - "documentation":"

    The timestamp when the async execution was created.

    " + "documentation":"

    The timestamp when the flow execution was created.

    " }, "endedAt":{ "shape":"DateTimestamp", - "documentation":"

    The timestamp when the async execution ended. This field is only populated when the execution has completed, failed, timed out, or been aborted.

    " + "documentation":"

    The timestamp when the flow execution ended. This field is only populated when the execution has completed, failed, timed out, or been aborted.

    " }, "executionArn":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the async execution.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution.

    " }, "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", @@ -2279,10 +2279,10 @@ }, "status":{ "shape":"FlowExecutionStatus", - "documentation":"

    The current status of the async execution.

    Async executions time out after 24 hours.

    " + "documentation":"

    The current status of the flow execution.

    Flow executions time out after 24 hours.

    " } }, - "documentation":"

    Contains summary information about a flow's async execution, including its status, timestamps, and identifiers.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    " + "documentation":"

    Contains summary information about a flow execution, including its status, timestamps, and identifiers.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " }, "FlowFailureEvent":{ "type":"structure", @@ -2305,7 +2305,7 @@ "documentation":"

    The timestamp when the failure occurred.

    " } }, - "documentation":"

    Contains information about a failure that occurred at the flow level during an async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about a failure that occurred at the flow level during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "FlowIdentifier":{ @@ -2368,7 +2368,7 @@ "documentation":"

    The name of the input field as defined in the flow's input schema.

    " } }, - "documentation":"

    Represents an input field provided to a flow during an async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Represents an input field provided to a flow during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "FlowInputFields":{ @@ -2471,7 +2471,7 @@ "documentation":"

    The name of the output field as defined in the flow's output schema.

    " } }, - "documentation":"

    Represents an output field produced by a flow during an async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Represents an output field produced by a flow during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "FlowOutputFields":{ @@ -3059,13 +3059,13 @@ "members":{ "executionIdentifier":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The unique identifier of the async execution.

    ", + "documentation":"

    The unique identifier of the flow execution.

    ", "location":"uri", "locationName":"executionIdentifier" }, "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", - "documentation":"

    The unique identifier of the flow alias used for the async execution.

    ", + "documentation":"

    The unique identifier of the flow alias used for the flow execution.

    ", "location":"uri", "locationName":"flowAliasIdentifier" }, @@ -3093,15 +3093,15 @@ }, "definition":{ "shape":"String", - "documentation":"

    The flow definition used for the async execution, including the nodes, connections, and configuration at the time when the execution started.

    The definition returns as a string that follows the structure of a FlowDefinition object.

    " + "documentation":"

    The flow definition used for the flow execution, including the nodes, connections, and configuration at the time when the execution started.

    The definition returns as a string that follows the structure of a FlowDefinition object.

    " }, "executionRoleArn":{ "shape":"FlowExecutionRoleArn", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM service role that's used by the async execution.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM service role that's used by the flow execution.

    " }, "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", - "documentation":"

    The unique identifier of the flow alias used for the async execution.

    " + "documentation":"

    The unique identifier of the flow alias used for the flow execution.

    " }, "flowIdentifier":{ "shape":"FlowIdentifier", @@ -3109,7 +3109,7 @@ }, "flowVersion":{ "shape":"Version", - "documentation":"

    The version of the flow used for the async execution.

    " + "documentation":"

    The version of the flow used for the flow execution.

    " } } }, @@ -3123,7 +3123,7 @@ "members":{ "executionIdentifier":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The unique identifier of the async execution to retrieve.

    ", + "documentation":"

    The unique identifier of the flow execution to retrieve.

    ", "location":"uri", "locationName":"executionIdentifier" }, @@ -3154,15 +3154,15 @@ "members":{ "endedAt":{ "shape":"DateTimestamp", - "documentation":"

    The timestamp when the async execution ended. This field is only populated when the execution has completed, failed, timed out, or been aborted.

    " + "documentation":"

    The timestamp when the flow execution ended. This field is only populated when the execution has completed, failed, timed out, or been aborted.

    " }, "errors":{ "shape":"FlowExecutionErrors", - "documentation":"

    A list of errors that occurred during the async execution. Each error includes an error code, message, and the node where the error occurred, if applicable.

    " + "documentation":"

    A list of errors that occurred during the flow execution. Each error includes an error code, message, and the node where the error occurred, if applicable.

    " }, "executionArn":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the async execution.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution.

    " }, "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", @@ -3178,11 +3178,11 @@ }, "startedAt":{ "shape":"DateTimestamp", - "documentation":"

    The timestamp when the async execution started.

    " + "documentation":"

    The timestamp when the flow execution started.

    " }, "status":{ "shape":"FlowExecutionStatus", - "documentation":"

    The current status of the async execution.

    Async executions time out after 24 hours.

    " + "documentation":"

    The current status of the flow execution.

    Flow executions time out after 24 hours.

    " } } }, @@ -4314,6 +4314,10 @@ "shape":"MemoryId", "documentation":"

    The unique identifier of the agent memory.

    " }, + "promptCreationConfigurations":{ + "shape":"PromptCreationConfigurations", + "documentation":"

    Specifies parameters that control how the service populates the agent prompt for an InvokeAgent request. You can control which aspects of previous invocations in the same agent session the service uses to populate the agent prompt. This gives you more granular control over the contextual history that is used to process the current request.

    " + }, "sessionId":{ "shape":"SessionId", "documentation":"

    The unique identifier of the session. Use the same value across requests to continue the same conversation.

    ", @@ -4504,6 +4508,10 @@ "shape":"OrchestrationType", "documentation":"

    Specifies the type of orchestration strategy for the agent. This is set to DEFAULT orchestration type, by default.

    " }, + "promptCreationConfigurations":{ + "shape":"PromptCreationConfigurations", + "documentation":"

    Specifies parameters that control how the service populates the agent prompt for an InvokeInlineAgent request. You can control which aspects of previous invocations in the same agent session the service uses to populate the agent prompt. This gives you more granular control over the contextual history that is used to process the current request.

    " + }, "promptOverrideConfiguration":{ "shape":"PromptOverrideConfiguration", "documentation":"

    Configurations for advanced prompts used to override the default prompts to enhance the accuracy of the inline agent.

    " @@ -4789,7 +4797,7 @@ }, "executionIdentifier":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The unique identifier of the async execution.

    ", + "documentation":"

    The unique identifier of the flow execution.

    ", "location":"uri", "locationName":"executionIdentifier" }, @@ -4825,7 +4833,7 @@ "members":{ "flowExecutionEvents":{ "shape":"FlowExecutionEvents", - "documentation":"

    A list of events that occurred during the async execution. Events can include node inputs and outputs, flow inputs and outputs, condition results, and failure events.

    " + "documentation":"

    A list of events that occurred during the flow execution. Events can include node inputs and outputs, flow inputs and outputs, condition results, and failure events.

    " }, "nextToken":{ "shape":"NextToken", @@ -4851,7 +4859,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of async executions to return in a single response. If more executions exist than the specified maxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "documentation":"

    The maximum number of flow executions to return in a single response. If more executions exist than the specified maxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", "location":"querystring", "locationName":"maxResults" }, @@ -4869,7 +4877,7 @@ "members":{ "flowExecutionSummaries":{ "shape":"FlowExecutionSummaries", - "documentation":"

    A list of async execution summaries. Each summary includes the execution ARN, flow identifier, flow alias identifier, flow version, status, and timestamps.

    " + "documentation":"

    A list of flow execution summaries. Each summary includes the execution ARN, flow identifier, flow alias identifier, flow version, status, and timestamps.

    " }, "nextToken":{ "shape":"NextToken", @@ -5289,7 +5297,7 @@ "documentation":"

    The document content of the field, which can contain text or structured data.

    " } }, - "documentation":"

    Contains the content of a flow node's input or output field for an async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains the content of a flow node's input or output field for a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true, "union":true }, @@ -5319,7 +5327,7 @@ "documentation":"

    The timestamp when the node failure occurred.

    " } }, - "documentation":"

    Contains information about a failure that occurred at a specific node during a flow's async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about a failure that occurred at a specific node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "NodeInputEvent":{ @@ -5343,7 +5351,7 @@ "documentation":"

    The timestamp when the inputs were provided to the node.

    " } }, - "documentation":"

    Contains information about the inputs provided to a specific node during a flow's async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about the inputs provided to a specific node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "NodeInputField":{ @@ -5362,7 +5370,7 @@ "documentation":"

    The name of the input field as defined in the node's input schema.

    " } }, - "documentation":"

    Represents an input field provided to a node during a flow's async execution.

    ", + "documentation":"

    Represents an input field provided to a node during a flow execution.

    ", "sensitive":true }, "NodeInputFields":{ @@ -5400,7 +5408,7 @@ "documentation":"

    The timestamp when the outputs were produced by the node.

    " } }, - "documentation":"

    Contains information about the outputs produced by a specific node during a flow's async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Contains information about the outputs produced by a specific node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "NodeOutputField":{ @@ -5419,7 +5427,7 @@ "documentation":"

    The name of the output field as defined in the node's output schema.

    " } }, - "documentation":"

    Represents an output field produced by a node during a flow's async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Represents an output field produced by a node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "NodeOutputFields":{ @@ -5982,6 +5990,25 @@ "max":10, "min":0 }, + "PromptCreationConfigurations":{ + "type":"structure", + "members":{ + "excludePreviousThinkingSteps":{ + "shape":"Boolean", + "documentation":"

    If true, the service removes any content between <thinking> tags from previous conversations in an agent session. The service will only remove content from already processed turns. This helps you remove content which might not be useful for current and subsequent invocations. This can reduce the input token count and potentially save costs. The default value is false.

    " + }, + "previousConversationTurnsToInclude":{ + "shape":"PromptCreationConfigurationsPreviousConversationTurnsToIncludeInteger", + "documentation":"

    The number of previous conversations from the ongoing agent session to include in the conversation history of the agent prompt, during the current invocation. This gives you more granular control over the context that the model is made aware of, and helps the model remove older context which is no longer useful during the ongoing agent session.

    " + } + }, + "documentation":"

    Specifies parameters that control how the service populates the agent prompt for an InvokeAgent or InvokeInlineAgent request. You can control which aspects of previous invocations in the same agent session the service uses to populate the agent prompt. This gives you more granular control over the contextual history that is used to process the current request.

    " + }, + "PromptCreationConfigurationsPreviousConversationTurnsToIncludeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, "PromptOverrideConfiguration":{ "type":"structure", "required":["promptConfigurations"], @@ -7293,7 +7320,7 @@ "documentation":"

    The name of the condition that was satisfied.

    " } }, - "documentation":"

    Represents a condition that was satisfied during a condition node evaluation in a flow's async execution.

    Asynchronous flows is in preview release for Amazon Bedrock and is subject to change.

    ", + "documentation":"

    Represents a condition that was satisfied during a condition node evaluation in a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", "sensitive":true }, "SatisfiedConditions":{ @@ -7485,13 +7512,13 @@ "members":{ "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", - "documentation":"

    The unique identifier of the flow alias to use for the async execution.

    ", + "documentation":"

    The unique identifier of the flow alias to use for the flow execution.

    ", "location":"uri", "locationName":"flowAliasIdentifier" }, "flowExecutionName":{ "shape":"FlowExecutionName", - "documentation":"

    The unique name for the async execution. If you don't provide one, a system-generated name is used.

    " + "documentation":"

    The unique name for the flow execution. If you don't provide one, a system-generated name is used.

    " }, "flowIdentifier":{ "shape":"FlowIdentifier", @@ -7501,11 +7528,11 @@ }, "inputs":{ "shape":"FlowInputs", - "documentation":"

    The input data required for the async execution. This must match the input schema defined in the flow.

    " + "documentation":"

    The input data required for the flow execution. This must match the input schema defined in the flow.

    " }, "modelPerformanceConfiguration":{ "shape":"ModelPerformanceConfiguration", - "documentation":"

    The performance settings for the foundation model used in the async execution.

    " + "documentation":"

    The performance settings for the foundation model used in the flow execution.

    " } } }, @@ -7514,7 +7541,7 @@ "members":{ "executionArn":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the async execution.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution.

    " } } }, @@ -7528,7 +7555,7 @@ "members":{ "executionIdentifier":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The unique identifier of the async execution to stop.

    ", + "documentation":"

    The unique identifier of the flow execution to stop.

    ", "location":"uri", "locationName":"executionIdentifier" }, @@ -7552,11 +7579,11 @@ "members":{ "executionArn":{ "shape":"FlowExecutionIdentifier", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the async execution that was stopped.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution that was stopped.

    " }, "status":{ "shape":"FlowExecutionStatus", - "documentation":"

    The updated status of the async execution after the stop request. This will typically be ABORTED if the execution was successfully stopped.

    " + "documentation":"

    The updated status of the flow execution after the stop request. This will typically be ABORTED if the execution was successfully stopped.

    " } } }, diff --git a/services/bedrockdataautomation/pom.xml b/services/bedrockdataautomation/pom.xml index 9c730078e1e7..27ff5db8f474 100644 --- a/services/bedrockdataautomation/pom.xml +++ b/services/bedrockdataautomation/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bedrockdataautomation AWS Java SDK :: Services :: Bedrock Data Automation diff --git a/services/bedrockdataautomationruntime/pom.xml b/services/bedrockdataautomationruntime/pom.xml index a17592c7947d..1d53719bd609 100644 --- a/services/bedrockdataautomationruntime/pom.xml +++ b/services/bedrockdataautomationruntime/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bedrockdataautomationruntime AWS Java SDK :: Services :: Bedrock Data Automation Runtime diff --git a/services/bedrockruntime/pom.xml b/services/bedrockruntime/pom.xml index 8c9a174bed3a..5223f060c4b6 100644 --- a/services/bedrockruntime/pom.xml +++ b/services/bedrockruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT bedrockruntime AWS Java SDK :: Services :: Bedrock Runtime diff --git a/services/billing/pom.xml b/services/billing/pom.xml index 2945ed318f29..dbda6b263b65 100644 --- a/services/billing/pom.xml +++ b/services/billing/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT billing AWS Java SDK :: Services :: Billing diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 65af8348c72f..134e14a19968 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/braket/pom.xml b/services/braket/pom.xml index aafe23a4c2da..dc71ac044b80 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 9d180c58442a..fc58090e32cb 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chatbot/pom.xml b/services/chatbot/pom.xml index cbb0ca61c013..63df8e86012e 100644 --- a/services/chatbot/pom.xml +++ b/services/chatbot/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chatbot AWS Java SDK :: Services :: Chatbot diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 4cf902af1591..6517d8d3aab9 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index fbcfe9aa5012..d100c4324d7e 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 03177adab807..decb9cadd554 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index c78cb7505fc7..0a4186ddc929 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index 4b716a0a43e9..1bb383911f8f 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index 80e651410a1d..ea5bd5a90178 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index 6ae495b8c139..36d2f325dfbd 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cleanroomsml/pom.xml b/services/cleanroomsml/pom.xml index fc59f2224fc1..5ed3269eedb6 100644 --- a/services/cleanroomsml/pom.xml +++ b/services/cleanroomsml/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cleanroomsml AWS Java SDK :: Services :: Clean Rooms ML diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index bc33687aa770..6162efa2c57d 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index 6176bc821f8a..d21e8d80ac91 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 87cc55f9bd22..21ca1d671549 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 2375a5539a13..b1ae23576ed4 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 297ec54c0184..97cb5c3a4c59 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -1467,13 +1467,11 @@ }, "ActivateOrganizationsAccessInput":{ "type":"structure", - "members":{ - } + "members":{} }, "ActivateOrganizationsAccessOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "ActivateTypeInput":{ "type":"structure", @@ -1538,8 +1536,7 @@ }, "AlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The resource with the name requested already exists.

    ", "error":{ "code":"AlreadyExistsException", @@ -1816,8 +1813,7 @@ }, "ChangeSetNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified change set name or ID doesn't exit. To view valid change sets for a stack, use the ListChangeSets operation.

    ", "error":{ "code":"ChangeSetNotFound", @@ -1948,8 +1944,7 @@ }, "ConcurrentResourcesLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    No more than 5 generated templates can be in an InProgress or Pending status at one time. This error is also returned if a generated template that is in an InProgress or Pending status is attempted to be updated or deleted.

    ", "error":{ "code":"ConcurrentResourcesLimitExceeded", @@ -1995,8 +1990,7 @@ }, "ContinueUpdateRollbackOutput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output for a ContinueUpdateRollback operation.

    " }, "CreateChangeSetInput":{ @@ -2380,8 +2374,7 @@ }, "CreatedButModifiedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified resource exists, but has been changed.

    ", "error":{ "code":"CreatedButModifiedException", @@ -2393,13 +2386,11 @@ "CreationTime":{"type":"timestamp"}, "DeactivateOrganizationsAccessInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeactivateOrganizationsAccessOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeactivateTypeInput":{ "type":"structure", @@ -2420,8 +2411,7 @@ }, "DeactivateTypeOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChangeSetInput":{ "type":"structure", @@ -2440,8 +2430,7 @@ }, "DeleteChangeSetOutput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output for the DeleteChangeSet action.

    " }, "DeleteGeneratedTemplateInput":{ @@ -2549,8 +2538,7 @@ }, "DeleteStackSetOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletionMode":{ "type":"string", @@ -2612,8 +2600,7 @@ }, "DeregisterTypeOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAccountLimitsInput":{ "type":"structure", @@ -3656,8 +3643,7 @@ }, "ExecuteChangeSetOutput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output for the ExecuteChangeSet action.

    " }, "ExecuteStackRefactorInput":{ @@ -3744,8 +3730,7 @@ }, "GeneratedTemplateNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The generated template was not found.

    ", "error":{ "code":"GeneratedTemplateNotFound", @@ -3987,8 +3972,7 @@ }, "HookResultNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified target doesn't have any requested Hook invocations.

    ", "error":{ "code":"HookResultNotFound", @@ -4150,8 +4134,7 @@ "IncludePropertyValues":{"type":"boolean"}, "InsufficientCapabilitiesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The template contains resources with capabilities that weren't specified in the Capabilities parameter.

    ", "error":{ "code":"InsufficientCapabilitiesException", @@ -4162,8 +4145,7 @@ }, "InvalidChangeSetStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified change set can't be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS, or the stack status might be UPDATE_IN_PROGRESS.

    ", "error":{ "code":"InvalidChangeSetStatus", @@ -4174,8 +4156,7 @@ }, "InvalidOperationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified operation isn't valid.

    ", "error":{ "code":"InvalidOperationException", @@ -4186,8 +4167,7 @@ }, "InvalidStateTransitionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Error reserved for use by the CloudFormation CLI. CloudFormation doesn't return this error to users.

    ", "error":{ "code":"InvalidStateTransition", @@ -4216,8 +4196,7 @@ "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The quota for the resource has already been reached.

    For information about resource and stack limitations, see CloudFormation quotas in the CloudFormation User Guide.

    ", "error":{ "code":"LimitExceededException", @@ -5090,8 +5069,7 @@ }, "NameAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified name is already in use.

    ", "error":{ "code":"NameAlreadyExistsException", @@ -5134,8 +5112,7 @@ }, "OperationIdAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified operation ID already exists.

    ", "error":{ "code":"OperationIdAlreadyExistsException", @@ -5146,8 +5123,7 @@ }, "OperationInProgressException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Another operation is currently in progress for this stack set. Only one operation can be performed for a stack set at a given time.

    ", "error":{ "code":"OperationInProgressException", @@ -5158,8 +5134,7 @@ }, "OperationNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified ID refers to an operation that doesn't exist.

    ", "error":{ "code":"OperationNotFoundException", @@ -5208,8 +5183,7 @@ }, "OperationStatusCheckFailedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Error reserved for use by the CloudFormation CLI. CloudFormation doesn't return this error to users.

    ", "error":{ "code":"ConditionalCheckFailed", @@ -5531,8 +5505,7 @@ }, "RecordHandlerProgressOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "RefreshAllResources":{"type":"boolean"}, "Region":{ @@ -5943,8 +5916,7 @@ "ResourceScanId":{"type":"string"}, "ResourceScanInProgressException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A resource scan is currently in progress. Only one can be run at a time for an account in a Region.

    ", "error":{ "code":"ResourceScanInProgress", @@ -5955,8 +5927,7 @@ }, "ResourceScanLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The limit on resource scans has been exceeded. Reasons include:

    • Exceeded the daily quota for resource scans.

    • A resource scan recently failed. You must wait 10 minutes before starting a new resource scan.

    • The last resource scan failed after exceeding 100,000 resources. When this happens, you must wait 24 hours before starting a new resource scan.

    ", "error":{ "code":"ResourceScanLimitExceeded", @@ -5967,8 +5938,7 @@ }, "ResourceScanNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The resource scan was not found.

    ", "error":{ "code":"ResourceScanNotFound", @@ -6286,7 +6256,7 @@ "members":{ "Types":{ "shape":"ResourceTypeFilters", - "documentation":"

    An array of strings where each string represents an Amazon Web Services resource type you want to scan. Each string defines the resource type using the format AWS::ServiceName::ResourceType, for example, AWS::DynamoDB::Table. For the full list of supported resource types, see the Resource type support table in the CloudFormation User Guide.

    To scan all resource types within a service, you can use a wildcard, represented by an asterisk (*). You can place a asterisk at only the end of the string, for example, AWS::S3::*.

    " + "documentation":"

    An array of strings where each string represents an Amazon Web Services resource type you want to scan. Each string defines the resource type using the format AWS::ServiceName::ResourceType, for example, AWS::DynamoDB::Table. For the full list of supported resource types, see the Resource type support table in the CloudFormation User Guide.

    To scan all resource types within a service, you can use a wildcard, represented by an asterisk (*). You can place an asterisk at only the end of the string, for example, AWS::S3::*.

    " } }, "documentation":"

    A filter that is used to specify which resource types to scan.

    " @@ -6429,8 +6399,7 @@ }, "SetTypeDefaultVersionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "SignalResourceInput":{ "type":"structure", @@ -6857,8 +6826,7 @@ }, "StackInstanceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack instance doesn't exist.

    ", "error":{ "code":"StackInstanceNotFoundException", @@ -6986,8 +6954,7 @@ }, "StackNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack ARN doesn't exist or stack doesn't exist corresponding to the ARN in input.

    ", "error":{ "code":"StackNotFoundException", @@ -7108,8 +7075,7 @@ "StackRefactorId":{"type":"string"}, "StackRefactorNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack refactor can't be found.

    ", "error":{ "code":"StackRefactorNotFoundException", @@ -7604,8 +7570,7 @@ }, "StackSetNotEmptyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You can't yet delete this stack set, because it still contains one or more stack instances. Delete all stack instances from the stack set before deleting the stack set.

    ", "error":{ "code":"StackSetNotEmptyException", @@ -7616,8 +7581,7 @@ }, "StackSetNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack set doesn't exist.

    ", "error":{ "code":"StackSetNotFoundException", @@ -7994,8 +7958,7 @@ }, "StaleRequestException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Another operation has been performed on this stack set since the specified operation was performed.

    ", "error":{ "code":"StaleRequestException", @@ -8053,8 +8016,7 @@ }, "StopStackSetOperationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "SupportedMajorVersion":{ "type":"integer", @@ -8291,8 +8253,7 @@ "Timestamp":{"type":"timestamp"}, "TokenAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A client request token already exists.

    ", "error":{ "code":"TokenAlreadyExistsException", @@ -8409,8 +8370,7 @@ }, "TypeConfigurationNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified extension configuration can't be found.

    ", "error":{ "code":"TypeConfigurationNotFoundException", @@ -8452,8 +8412,7 @@ }, "TypeNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified extension doesn't exist in the CloudFormation registry.

    ", "error":{ "code":"TypeNotFoundException", @@ -8997,7 +8956,8 @@ "enum":[ "MUTUALLY_EXCLUSIVE_PROPERTIES", "UNSUPPORTED_PROPERTIES", - "MUTUALLY_EXCLUSIVE_TYPES" + "MUTUALLY_EXCLUSIVE_TYPES", + "EXCLUDED_PROPERTIES" ] }, "Warnings":{ diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 014b7548c9b8..2cf424873af5 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudfrontkeyvaluestore/pom.xml b/services/cloudfrontkeyvaluestore/pom.xml index 29c034fb9db8..8c9ecccbb5e1 100644 --- a/services/cloudfrontkeyvaluestore/pom.xml +++ b/services/cloudfrontkeyvaluestore/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudfrontkeyvaluestore AWS Java SDK :: Services :: Cloud Front Key Value Store diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index fc042ebfe84e..c376b4755a8d 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index d70183c4bbaf..36c506ae3caa 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index fe1921d0bb90..1befc50646a7 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index f437c3974e0a..88f7f1a9099e 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 5662773636e5..429eec6a8051 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index bf317e00ad41..800bdb449789 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -459,6 +459,29 @@ "documentation":"

    Returns the specified dashboard.

    ", "idempotent":true }, + "GetEventConfiguration":{ + "name":"GetEventConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEventConfigurationRequest"}, + "output":{"shape":"GetEventConfigurationResponse"}, + "errors":[ + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InvalidEventDataStoreStatusException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidEventDataStoreCategoryException"}, + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

    Retrieves the current event configuration settings for the specified event data store, including details about maximum event size and context key selectors configured for the event data store.

    ", + "idempotent":true + }, "GetEventDataStore":{ "name":"GetEventDataStore", "http":{ @@ -814,6 +837,35 @@ "documentation":"

    Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a Region within the last 90 days.

    LookupEvents returns recent Insights events for trails that enable Insights. To view Insights events for an event data store, you can run queries on your Insights event data store, and you can also view the Lake dashboard for Insights.

    Lookup supports the following attributes for management events:

    • Amazon Web Services access key

    • Event ID

    • Event name

    • Event source

    • Read only

    • Resource name

    • Resource type

    • User name

    Lookup supports the following attributes for Insights events:

    • Event ID

    • Event name

    • Event source

    All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

    The rate of lookup requests is limited to two per second, per account, per Region. If this limit is exceeded, a throttling error occurs.

    ", "idempotent":true }, + "PutEventConfiguration":{ + "name":"PutEventConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEventConfigurationRequest"}, + "output":{"shape":"PutEventConfigurationResponse"}, + "errors":[ + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InvalidEventDataStoreStatusException"}, + {"shape":"InvalidEventDataStoreCategoryException"}, + {"shape":"InactiveEventDataStoreException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ConflictException"}, + {"shape":"NotOrganizationMasterAccountException"}, + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"InsufficientIAMAccessPermissionException"} + ], + "documentation":"

    Updates the event configuration settings for the specified event data store. You can update the maximum event size and context key selectors.

    ", + "idempotent":true + }, "PutEventSelectors":{ "name":"PutEventSelectors", "http":{ @@ -909,7 +961,8 @@ {"shape":"OrganizationNotInAllFeaturesModeException"}, {"shape":"OrganizationsNotInUseException"}, {"shape":"UnsupportedOperationException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"InsufficientIAMAccessPermissionException"} ], "documentation":"

    Registers an organization’s member account as the CloudTrail delegated administrator.

    ", "idempotent":true @@ -1019,7 +1072,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NotOrganizationMasterAccountException"}, {"shape":"NoManagementAccountSLRExistsException"}, - {"shape":"InsufficientDependencyServiceAccessPermissionException"} + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"ConflictException"} ], "documentation":"

    Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem.

    " }, @@ -1118,7 +1172,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NotOrganizationMasterAccountException"}, {"shape":"NoManagementAccountSLRExistsException"}, - {"shape":"InsufficientDependencyServiceAccessPermissionException"} + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"ConflictException"} ], "documentation":"

    Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem.

    " }, @@ -1289,15 +1344,13 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You do not have sufficient access to perform this action.

    ", "exception":true }, "AccountHasOngoingImportException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when you start a new import and a previous import is still in progress.

    ", "exception":true }, @@ -1309,22 +1362,19 @@ }, "AccountNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified account is not found or not part of an organization.

    ", "exception":true }, "AccountNotRegisteredException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified account is not registered as the CloudTrail delegated administrator.

    ", "exception":true }, "AccountRegisteredException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the account is already registered as the CloudTrail delegated administrator.

    ", "exception":true }, @@ -1348,8 +1398,7 @@ }, "AddTagsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data if successful. Otherwise, returns an error.

    " }, "AdvancedEventSelector":{ @@ -1463,8 +1512,7 @@ }, "CannotDelegateManagementAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the management account of an organization is registered as the CloudTrail delegated administrator.

    ", "exception":true }, @@ -1484,15 +1532,13 @@ }, "ChannelARNInvalidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified value of ChannelARN is not valid.

    ", "exception":true }, "ChannelAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided channel already exists.

    ", "exception":true }, @@ -1504,15 +1550,13 @@ }, "ChannelExistsForEDSException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified event data store cannot yet be deleted because it is in use by a channel.

    ", "exception":true }, "ChannelMaxLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the maximum number of channels limit is exceeded.

    ", "exception":true }, @@ -1524,8 +1568,7 @@ }, "ChannelNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when CloudTrail cannot find the specified channel.

    ", "exception":true }, @@ -1535,46 +1578,63 @@ }, "CloudTrailARNInvalidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when an operation is called with an ARN that is not valid.

    The following is the format of a trail ARN: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

    The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

    The following is the format of a dashboard ARN: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash

    The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

    ", "exception":true }, "CloudTrailAccessNotEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide.

    ", "exception":true }, "CloudTrailInvalidClientTokenIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when a call results in the InvalidClientTokenId error code. This can occur when you are creating or updating a trail to send notifications to an Amazon SNS topic that is in a suspended Amazon Web Services account.

    ", "exception":true }, "CloudWatchLogsDeliveryUnavailableException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Cannot set a CloudWatch Logs delivery for this Region.

    ", "exception":true }, "ConcurrentModificationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You are trying to update a resource when another request is in progress. Allow sufficient wait time for the previous request to complete, then retry your request.

    ", "exception":true }, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource is not ready for an operation. This can occur when you try to run an operation on a resource before CloudTrail has time to fully load the resource, or because another operation is modifying the resource. If this exception occurs, wait a few minutes, and then try the operation again.

    ", "exception":true }, + "ContextKeySelector":{ + "type":"structure", + "required":[ + "Type", + "Equals" + ], + "members":{ + "Type":{ + "shape":"Type", + "documentation":"

    Specifies the type of the event record field in ContextKeySelector. Valid values include RequestContext, TagContext.

    " + }, + "Equals":{ + "shape":"OperatorTargetList", + "documentation":"

    A list of keys defined by Type to be included in CloudTrail enriched events.

    " + } + }, + "documentation":"

    An object that contains information types to be included in CloudTrail enriched events.

    " + }, + "ContextKeySelectors":{ + "type":"list", + "member":{"shape":"ContextKeySelector"}, + "max":2 + }, "CreateChannelRequest":{ "type":"structure", "required":[ @@ -1954,8 +2014,7 @@ "Date":{"type":"timestamp"}, "DelegatedAdminAccountLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the maximum number of CloudTrail delegated administrators is reached.

    ", "exception":true }, @@ -1971,8 +2030,7 @@ }, "DeleteChannelResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDashboardRequest":{ "type":"structure", @@ -1986,8 +2044,7 @@ }, "DeleteDashboardResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventDataStoreRequest":{ "type":"structure", @@ -2001,8 +2058,7 @@ }, "DeleteEventDataStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteResourcePolicyRequest":{ "type":"structure", @@ -2016,8 +2072,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTrailRequest":{ "type":"structure", @@ -2032,8 +2087,7 @@ }, "DeleteTrailResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "DeliveryS3Uri":{ @@ -2068,8 +2122,7 @@ }, "DeregisterOrganizationDelegatedAdminResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the following response if successful. Otherwise, returns an error.

    " }, "DescribeQueryRequest":{ @@ -2374,15 +2427,13 @@ }, "EventDataStoreARNInvalidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified event data store ARN is not valid or does not map to an event data store in your account.

    ", "exception":true }, "EventDataStoreAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    An event data store with that name already exists.

    ", "exception":true }, @@ -2394,15 +2445,13 @@ }, "EventDataStoreFederationEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You cannot delete the event data store because Lake query federation is enabled. To delete the event data store, run the DisableFederation operation to disable Lake query federation on the event data store.

    ", "exception":true }, "EventDataStoreHasOngoingImportException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when you try to update or delete an event data store that currently has an import in progress.

    ", "exception":true }, @@ -2420,8 +2469,7 @@ }, "EventDataStoreMaxLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Your account has used the maximum number of event data stores.

    ", "exception":true }, @@ -2433,8 +2481,7 @@ }, "EventDataStoreNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified event data store was not found.

    ", "exception":true }, @@ -2451,8 +2498,7 @@ }, "EventDataStoreTerminationProtectedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event data store cannot be deleted because termination protection is enabled for it.

    ", "exception":true }, @@ -2555,8 +2601,7 @@ }, "GenerateResponseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when a valid query could not be generated for the provided prompt.

    ", "exception":true }, @@ -2654,6 +2699,32 @@ } } }, + "GetEventConfigurationRequest":{ + "type":"structure", + "members":{ + "EventDataStore":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which you want to retrieve event configuration settings.

    " + } + } + }, + "GetEventConfigurationResponse":{ + "type":"structure", + "members":{ + "EventDataStoreArn":{ + "shape":"EventDataStoreArn", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which the event configuration settings are returned.

    " + }, + "MaxEventSize":{ + "shape":"MaxEventSize", + "documentation":"

    The maximum allowed size for events stored in the specified event data store.

    " + }, + "ContextKeySelectors":{ + "shape":"ContextKeySelectors", + "documentation":"

    The list of context key selectors that are configured for the event data store.

    " + } + } + }, "GetEventDataStoreRequest":{ "type":"structure", "required":["EventDataStore"], @@ -3068,8 +3139,7 @@ }, "ImportNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified import was not found.

    ", "exception":true }, @@ -3152,15 +3222,13 @@ }, "InactiveEventDataStoreException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event data store is inactive.

    ", "exception":true }, "InactiveQueryException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified query cannot be canceled because it is in the FINISHED, FAILED, TIMED_OUT, or CANCELLED state.

    ", "exception":true }, @@ -3192,8 +3260,7 @@ }, "InsightNotEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    If you run GetInsightSelectors on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException.

    ", "exception":true }, @@ -3246,234 +3313,207 @@ }, "InsufficientDependencyServiceAccessPermissionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the IAM identity that is used to create the organization resource lacks one or more required permissions for creating an organization resource in a required service.

    ", "exception":true }, "InsufficientEncryptionPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For the CreateTrail PutInsightSelectors, UpdateTrail, StartQuery, and StartImport operations, this exception is thrown when the policy on the S3 bucket or KMS key does not have sufficient permissions for the operation.

    For all other operations, this exception is thrown when the policy for the KMS key does not have sufficient permissions for the operation.

    ", "exception":true }, + "InsufficientIAMAccessPermissionException":{ + "type":"structure", + "members":{}, + "documentation":"

    The task can't be completed because you are signed in with an account that lacks permissions to view or create a service-linked role. Sign in with an account that has the required permissions and then try again.

    ", + "exception":true + }, "InsufficientS3BucketPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the policy on the S3 bucket is not sufficient.

    ", "exception":true }, "InsufficientSnsTopicPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the policy on the Amazon SNS topic is not sufficient.

    ", "exception":true }, "Integer":{"type":"integer"}, "InvalidCloudWatchLogsLogGroupArnException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided CloudWatch Logs log group is not valid.

    ", "exception":true }, "InvalidCloudWatchLogsRoleArnException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided role is not valid.

    ", "exception":true }, "InvalidDateRangeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A date range for the query was specified that is not valid. Be sure that the start time is chronologically before the end time. For more information about writing a query, see Create or edit a query in the CloudTrail User Guide.

    ", "exception":true }, "InvalidEventCategoryException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Occurs if an event category that is not valid is specified as a value of EventCategory.

    ", "exception":true }, "InvalidEventDataStoreCategoryException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when event categories of specified event data stores are not valid.

    ", "exception":true }, "InvalidEventDataStoreStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event data store is not in a status that supports the operation.

    ", "exception":true }, "InvalidEventSelectorsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the PutEventSelectors operation is called with a number of event selectors, advanced event selectors, or data resources that is not valid. The combination of event selectors or advanced event selectors and data resources is not valid. A trail can have up to 5 event selectors. If a trail uses advanced event selectors, a maximum of 500 total values for all conditions in all advanced event selectors is allowed. A trail is limited to 250 data resources. These data resources can be distributed across event selectors, but the overall total cannot exceed 250.

    You can:

    • Specify a valid number of event selectors (1 to 5) for a trail.

    • Specify a valid number of data resources (1 to 250) for an event selector. The limit of number of resources on an individual event selector is configurable up to 250. However, this upper limit is allowed only if the total number of data resources does not exceed 250 across all event selectors for a trail.

    • Specify up to 500 values for all conditions in all advanced event selectors for a trail.

    • Specify a valid value for a parameter. For example, specifying the ReadWriteType parameter with a value of read-only is not valid.

    ", "exception":true }, "InvalidHomeRegionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when an operation is called on a trail from a Region other than the Region in which the trail was created.

    ", "exception":true }, "InvalidImportSourceException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided source S3 bucket is not valid for import.

    ", "exception":true }, "InvalidInsightSelectorsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For PutInsightSelectors, this exception is thrown when the formatting or syntax of the InsightSelectors JSON statement is not valid, or the specified InsightType in the InsightSelectors statement is not valid. Valid values for InsightType are ApiCallRateInsight and ApiErrorRateInsight. To enable Insights on an event data store, the destination event data store specified by the InsightsDestination parameter must log Insights events and the source event data store specified by the EventDataStore parameter must log management events.

    For UpdateEventDataStore, this exception is thrown if Insights are enabled on the event data store and the updated advanced event selectors are not compatible with the configured InsightSelectors. If the InsightSelectors includes an InsightType of ApiCallRateInsight, the source event data store must log write management events. If the InsightSelectors includes an InsightType of ApiErrorRateInsight, the source event data store must log management events.

    ", "exception":true }, "InvalidKmsKeyIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the KMS key ARN is not valid.

    ", "exception":true }, "InvalidLookupAttributesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Occurs when a lookup attribute is specified that is not valid.

    ", "exception":true }, "InvalidMaxResultsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown if the limit specified is not valid.

    ", "exception":true }, "InvalidNextTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A token that is not valid, or a token that was previously used in a request with different parameters. This exception is thrown if the token is not valid.

    ", "exception":true }, "InvalidParameterCombinationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the combination of parameters provided is not valid.

    ", "exception":true }, "InvalidParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The request includes a parameter that is not valid.

    ", "exception":true }, "InvalidQueryStatementException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The query that was submitted has validation errors, or uses incorrect syntax or unsupported keywords. For more information about writing a query, see Create or edit a query in the CloudTrail User Guide.

    ", "exception":true }, "InvalidQueryStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The query status is not valid for the operation.

    ", "exception":true }, "InvalidS3BucketNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided S3 bucket name is not valid.

    ", "exception":true }, "InvalidS3PrefixException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided S3 prefix is not valid.

    ", "exception":true }, "InvalidSnsTopicNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided SNS topic name is not valid.

    ", "exception":true }, "InvalidSourceException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified value of Source is not valid.

    ", "exception":true }, "InvalidTagParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified tag key or values are not valid. It can also occur if there are duplicate tags or too many tags on the resource.

    ", "exception":true }, "InvalidTimeRangeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Occurs if the timestamp values are not valid. Either the start time occurs after the end time, or the time range is outside the range of possible values.

    ", "exception":true }, "InvalidTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Reserved for future use.

    ", "exception":true }, "InvalidTrailNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided trail name is not valid. Trail names must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    • Start with a letter or number, and end with a letter or number

    • Be between 3 and 128 characters

    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid.

    • Not be in IP address format (for example, 192.168.5.4)

    ", "exception":true }, "KmsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when there is an issue with the specified KMS key and the trail or event data store can't be updated.

    ", "exception":true }, "KmsKeyDisabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is no longer in use.

    ", "deprecated":true, "exception":true }, "KmsKeyNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same Region, or when the KMS key associated with the Amazon SNS topic either does not exist or is not in the same Region.

    ", "exception":true }, @@ -3871,7 +3911,7 @@ "type":"string", "max":1024, "min":3, - "pattern":"^[a-zA-Z0-9._/\\-:]+$" + "pattern":"^[a-zA-Z0-9._/\\-:*]+$" }, "Long":{"type":"long"}, "LookupAttribute":{ @@ -3960,11 +4000,17 @@ }, "MaxConcurrentQueriesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You are already running the maximum number of concurrent queries. The maximum number of concurrent queries is 10. Wait a minute for some queries to finish, and then run the query again.

    ", "exception":true }, + "MaxEventSize":{ + "type":"string", + "enum":[ + "Standard", + "Large" + ] + }, "MaxQueryResults":{ "type":"integer", "max":1000, @@ -3977,37 +4023,32 @@ }, "MaximumNumberOfTrailsExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the maximum number of trails is reached.

    ", "exception":true }, "NextToken":{"type":"string"}, "NoManagementAccountSLRExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the management account does not have a service-linked role.

    ", "exception":true }, "NotOrganizationManagementAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the account making the request is not the organization's management account.

    ", "exception":true }, "NotOrganizationMasterAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores.

    ", "exception":true }, "OperationNotPermittedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the requested operation is not permitted.

    ", "exception":true }, @@ -4016,6 +4057,17 @@ "member":{"shape":"OperatorValue"}, "min":1 }, + "OperatorTargetList":{ + "type":"list", + "member":{"shape":"OperatorTargetListMember"}, + "max":50, + "min":1 + }, + "OperatorTargetListMember":{ + "type":"string", + "max":128, + "min":1 + }, "OperatorValue":{ "type":"string", "max":2048, @@ -4024,15 +4076,13 @@ }, "OrganizationNotInAllFeaturesModeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when Organizations is not configured to support all features. All features must be enabled in Organizations to support creating an organization trail or event data store.

    ", "exception":true }, "OrganizationsNotInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the request is made from an Amazon Web Services account that is not a member of an organization. To make this request, sign in using the credentials of an account that belongs to an organization.

    ", "exception":true }, @@ -4109,6 +4159,44 @@ "type":"list", "member":{"shape":"PublicKey"} }, + "PutEventConfigurationRequest":{ + "type":"structure", + "required":[ + "MaxEventSize", + "ContextKeySelectors" + ], + "members":{ + "EventDataStore":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which you want to update event configuration settings.

    " + }, + "MaxEventSize":{ + "shape":"MaxEventSize", + "documentation":"

    The maximum allowed size for events to be stored in the specified event data store. If you are using context key selectors, MaxEventSize must be set to Large.

    " + }, + "ContextKeySelectors":{ + "shape":"ContextKeySelectors", + "documentation":"

    A list of context key selectors that will be included to provide enriched event data.

    " + } + } + }, + "PutEventConfigurationResponse":{ + "type":"structure", + "members":{ + "EventDataStoreArn":{ + "shape":"EventDataStoreArn", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which the event configuration settings were updated.

    " + }, + "MaxEventSize":{ + "shape":"MaxEventSize", + "documentation":"

    The maximum allowed size for events stored in the specified event data store.

    " + }, + "ContextKeySelectors":{ + "shape":"ContextKeySelectors", + "documentation":"

    The list of context key selectors that are configured for the event data store.

    " + } + } + }, "PutEventSelectorsRequest":{ "type":"structure", "required":["TrailName"], @@ -4251,8 +4339,7 @@ }, "QueryIdNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The query ID does not exist or does not map to a query.

    ", "exception":true }, @@ -4435,8 +4522,7 @@ }, "RegisterOrganizationDelegatedAdminResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the following response if successful. Otherwise, returns an error.

    " }, "RemoveTagsRequest":{ @@ -4459,8 +4545,7 @@ }, "RemoveTagsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "RequestWidget":{ @@ -4505,8 +4590,7 @@ }, "ResourceARNNotValidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided resource does not exist, or the ARN format of the resource is not valid.

    The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

    The following is the format of a dashboard ARN: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash

    The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

    ", "exception":true }, @@ -4527,8 +4611,7 @@ }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource is not found.

    ", "exception":true }, @@ -4539,15 +4622,13 @@ }, "ResourcePolicyNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource policy is not found.

    ", "exception":true }, "ResourcePolicyNotValidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the resouce-based policy has syntax errors, or contains a principal that is not valid.

    ", "exception":true }, @@ -4571,8 +4652,7 @@ }, "ResourceTypeNotSupportedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource type is not supported by CloudTrail.

    ", "exception":true }, @@ -4646,8 +4726,7 @@ }, "S3BucketDoesNotExistException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified S3 bucket does not exist.

    ", "exception":true }, @@ -4760,8 +4839,7 @@ }, "ServiceQuotaExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the quota is exceeded. For information about CloudTrail quotas, see Service quotas in the Amazon Web Services General Reference.

    ", "exception":true }, @@ -4820,8 +4898,7 @@ }, "StartEventDataStoreIngestionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StartImportRequest":{ "type":"structure", @@ -4898,8 +4975,7 @@ }, "StartLoggingResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "StartQueryRequest":{ @@ -4952,8 +5028,7 @@ }, "StopEventDataStoreIngestionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StopImportRequest":{ "type":"structure", @@ -5019,8 +5094,7 @@ }, "StopLoggingResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "String":{"type":"string"}, @@ -5051,8 +5125,7 @@ }, "TagsLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The number of tags per trail, event data store, dashboard, or channel has exceeded the permitted amount. Currently, the limit is 50.

    ", "exception":true }, @@ -5065,8 +5138,7 @@ "TerminationProtectionEnabled":{"type":"boolean"}, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the request rate exceeds the limit.

    ", "exception":true }, @@ -5151,8 +5223,7 @@ }, "TrailAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified trail already exists.

    ", "exception":true }, @@ -5184,15 +5255,13 @@ }, "TrailNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the trail with the given name is not found.

    ", "exception":true }, "TrailNotProvidedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is no longer in use.

    ", "exception":true }, @@ -5200,6 +5269,13 @@ "type":"list", "member":{"shape":"TrailInfo"} }, + "Type":{ + "type":"string", + "enum":[ + "TagContext", + "RequestContext" + ] + }, "UUID":{ "type":"string", "max":36, @@ -5208,8 +5284,7 @@ }, "UnsupportedOperationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the requested operation is not supported.

    ", "exception":true }, diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index b7cf2f64250c..5cc32940f108 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index b76607de6876..35b37492d63c 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index f7f1587658de..6270b87d73f4 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 92798e3474bc..d11eb0730cdf 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 103ba351a18e..22c6a687f857 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 5eaba744505b..c0089d32e14c 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index 005301143529..a069c8e53439 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 09ef87196bfd..0d89506c46db 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codeconnections/pom.xml b/services/codeconnections/pom.xml index 7282dcf82d19..2be612da6524 100644 --- a/services/codeconnections/pom.xml +++ b/services/codeconnections/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codeconnections AWS Java SDK :: Services :: Code Connections diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 93d973c62e46..224007ee3089 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index cf42eb4ffc70..3ddd667b2c54 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index da90d7aae552..49107fa27a0b 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml index 65cc3865e383..2286b097f041 100644 --- a/services/codegurusecurity/pom.xml +++ b/services/codegurusecurity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codegurusecurity AWS Java SDK :: Services :: Code Guru Security diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 1b90e7d3e775..08af935959b0 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index 162c49c916b9..48a8bbfd8def 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index bde3d2cd5065..905950fe3bba 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index 99074ad3887d..b11e0c7ce174 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index d09e0fedfc76..3646175ac442 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index d5bec418dbb9..d38131926011 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index ab3f4daee8c4..dcb74e117f00 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 50020ec09dc5..38f80923d41c 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index f0e301707343..4ed655752cbe 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 39ec013fc277..debe50c3138a 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -213,7 +213,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Export optimization recommendations for your Amazon Relational Database Service (Amazon RDS).

    Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Amazon RDS export job in progress per Amazon Web Services Region.

    " + "documentation":"

    Export optimization recommendations for your Amazon Aurora and Amazon Relational Database Service (Amazon RDS) databases.

    Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Amazon Aurora or RDS export job in progress per Amazon Web Services Region.

    " }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", @@ -469,7 +469,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns the projected metrics of Amazon RDS recommendations.

    " + "documentation":"

    Returns the projected metrics of Aurora and RDS database recommendations.

    " }, "GetRDSDatabaseRecommendations":{ "name":"GetRDSDatabaseRecommendations", @@ -489,7 +489,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns Amazon RDS recommendations.

    Compute Optimizer generates recommendations for Amazon RDS that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " + "documentation":"

    Returns Amazon Aurora and RDS database recommendations.

    Compute Optimizer generates recommendations for Amazon Aurora and RDS databases that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " }, "GetRecommendationPreferences":{ "name":"GetRecommendationPreferences", @@ -528,7 +528,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns the optimization findings for an account.

    It returns the number of:

    • Amazon EC2 instances in an account that are Underprovisioned, Overprovisioned, or Optimized.

    • Auto Scaling groups in an account that are NotOptimized, or Optimized.

    • Amazon EBS volumes in an account that are NotOptimized, or Optimized.

    • Lambda functions in an account that are NotOptimized, or Optimized.

    • Amazon ECS services in an account that are Underprovisioned, Overprovisioned, or Optimized.

    " + "documentation":"

    Returns the optimization findings for an account.

    It returns the number of:

    • Amazon EC2 instances in an account that are Underprovisioned, Overprovisioned, or Optimized.

    • EC2Auto Scaling groups in an account that are NotOptimized, or Optimized.

    • Amazon EBS volumes in an account that are NotOptimized, or Optimized.

    • Lambda functions in an account that are NotOptimized, or Optimized.

    • Amazon ECS services in an account that are Underprovisioned, Overprovisioned, or Optimized.

    • Commercial software licenses in an account that are InsufficientMetrics, NotOptimized or Optimized.

    • Amazon Aurora and Amazon RDS databases in an account that are Underprovisioned, Overprovisioned, Optimized, or NotOptimized.

    " }, "PutRecommendationPreferences":{ "name":"PutRecommendationPreferences", @@ -952,23 +952,23 @@ "members":{ "storageType":{ "shape":"StorageType", - "documentation":"

    The type of RDS storage.

    " + "documentation":"

    The type of DB storage.

    " }, "allocatedStorage":{ "shape":"AllocatedStorage", - "documentation":"

    The size of the RDS storage in gigabytes (GB).

    " + "documentation":"

    The size of the DB storage in gigabytes (GB).

    " }, "iops":{ "shape":"NullableIOPS", - "documentation":"

    The provisioned IOPs of the RDS storage.

    " + "documentation":"

    The provisioned IOPs of the DB storage.

    " }, "maxAllocatedStorage":{ "shape":"NullableMaxAllocatedStorage", - "documentation":"

    The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the RDS instance.

    " + "documentation":"

    The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

    " }, "storageThroughput":{ "shape":"NullableStorageThroughput", - "documentation":"

    The storage throughput of the RDS storage.

    " + "documentation":"

    The storage throughput of the DB storage.

    " } }, "documentation":"

    The configuration of the recommended RDS storage.

    " @@ -996,8 +996,7 @@ }, "DeleteRecommendationPreferencesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeRecommendationExportJobsRequest":{ "type":"structure", @@ -1877,11 +1876,11 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

    The Amazon Web Services account IDs for the export Amazon RDS recommendations.

    If your account is the management account or the delegated administrator of an organization, use this parameter to specify the member account you want to export recommendations to.

    This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    If this parameter or the include member accounts parameter is omitted, the recommendations for member accounts aren't included in the export.

    You can specify multiple account IDs per request.

    " + "documentation":"

    The Amazon Web Services account IDs for the export Amazon Aurora and RDS database recommendations.

    If your account is the management account or the delegated administrator of an organization, use this parameter to specify the member account you want to export recommendations to.

    This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    If this parameter or the include member accounts parameter is omitted, the recommendations for member accounts aren't included in the export.

    You can specify multiple account IDs per request.

    " }, "filters":{ "shape":"RDSDBRecommendationFilters", - "documentation":"

    An array of objects to specify a filter that exports a more specific set of Amazon RDS recommendations.

    " + "documentation":"

    An array of objects to specify a filter that exports a more specific set of Amazon Aurora and RDS recommendations.

    " }, "fieldsToExport":{ "shape":"ExportableRDSDBFields", @@ -2216,15 +2215,20 @@ "EngineVersion", "Idle", "MultiAZDBInstance", + "ClusterWriter", "CurrentDBInstanceClass", "CurrentStorageConfigurationStorageType", "CurrentStorageConfigurationAllocatedStorage", "CurrentStorageConfigurationMaxAllocatedStorage", "CurrentStorageConfigurationIOPS", "CurrentStorageConfigurationStorageThroughput", + "CurrentStorageEstimatedMonthlyVolumeIOPsCostVariation", "CurrentInstanceOnDemandHourlyPrice", "CurrentStorageOnDemandMonthlyPrice", "LookbackPeriodInDays", + "CurrentStorageEstimatedClusterInstanceOnDemandMonthlyCost", + "CurrentStorageEstimatedClusterStorageOnDemandMonthlyCost", + "CurrentStorageEstimatedClusterStorageIOOnDemandMonthlyCost", "CurrentInstancePerformanceRisk", "UtilizationMetricsCpuMaximum", "UtilizationMetricsMemoryMaximum", @@ -2244,6 +2248,9 @@ "UtilizationMetricsAuroraMemoryNumKillQueryTotalMaximum", "UtilizationMetricsReadIOPSEphemeralStorageMaximum", "UtilizationMetricsWriteIOPSEphemeralStorageMaximum", + "UtilizationMetricsVolumeBytesUsedAverage", + "UtilizationMetricsVolumeReadIOPsAverage", + "UtilizationMetricsVolumeWriteIOPsAverage", "InstanceFinding", "InstanceFindingReasonCodes", "StorageFinding", @@ -2258,6 +2265,7 @@ "StorageRecommendationOptionsIOPS", "StorageRecommendationOptionsStorageThroughput", "StorageRecommendationOptionsRank", + "StorageRecommendationOptionsEstimatedMonthlyVolumeIOPsCostVariation", "InstanceRecommendationOptionsInstanceOnDemandHourlyPrice", "InstanceRecommendationOptionsSavingsOpportunityPercentage", "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrency", @@ -2266,6 +2274,9 @@ "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts", "InstanceRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts", "StorageRecommendationOptionsOnDemandMonthlyPrice", + "StorageRecommendationOptionsEstimatedClusterInstanceOnDemandMonthlyCost", + "StorageRecommendationOptionsEstimatedClusterStorageOnDemandMonthlyCost", + "StorageRecommendationOptionsEstimatedClusterStorageIOOnDemandMonthlyCost", "StorageRecommendationOptionsSavingsOpportunityPercentage", "StorageRecommendationOptionsEstimatedMonthlySavingsCurrency", "StorageRecommendationOptionsEstimatedMonthlySavingsValue", @@ -2737,8 +2748,7 @@ }, "GetEnrollmentStatusRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetEnrollmentStatusResponse":{ "type":"structure", @@ -2933,7 +2943,7 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

    The ARN that identifies the Amazon RDS.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " + "documentation":"

    The ARN that identifies the Amazon Aurora or RDS database.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " }, "stat":{ "shape":"MetricStatistic", @@ -2968,23 +2978,23 @@ "members":{ "resourceArns":{ "shape":"ResourceArns", - "documentation":"

    The ARN that identifies the Amazon RDS.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    The following is the format of a DB Cluster ARN:

    arn:aws:rds:{region}:{accountId}:cluster:{resourceName}

    " + "documentation":"

    The ARN that identifies the Amazon Aurora or RDS database.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    The following is the format of a DB Cluster ARN:

    arn:aws:rds:{region}:{accountId}:cluster:{resourceName}

    " }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token to advance to the next page of Amazon RDS recommendations.

    " + "documentation":"

    The token to advance to the next page of Amazon Aurora and RDS database recommendations.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of Amazon RDS recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned nextToken value.

    " + "documentation":"

    The maximum number of Amazon Aurora and RDS database recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned nextToken value.

    " }, "filters":{ "shape":"RDSDBRecommendationFilters", - "documentation":"

    An array of objects to specify a filter that returns a more specific list of Amazon RDS recommendations.

    " + "documentation":"

    An array of objects to specify a filter that returns a more specific list of Amazon Aurora and RDS database recommendations.

    " }, "accountIds":{ "shape":"AccountIds", - "documentation":"

    Return the Amazon RDS recommendations to the specified Amazon Web Services account IDs.

    If your account is the management account or the delegated administrator of an organization, use this parameter to return the Amazon RDS recommendations to specific member accounts.

    You can only specify one account ID per request.

    " + "documentation":"

    Return the Amazon Aurora and RDS database recommendations to the specified Amazon Web Services account IDs.

    If your account is the management account or the delegated administrator of an organization, use this parameter to return the Amazon Aurora and RDS database recommendations to specific member accounts.

    You can only specify one account ID per request.

    " }, "recommendationPreferences":{"shape":"RecommendationPreferences"} } @@ -2994,11 +3004,11 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token to advance to the next page of Amazon RDS recommendations.

    " + "documentation":"

    The token to advance to the next page of Amazon Aurora and RDS database recommendations.

    " }, "rdsDBRecommendations":{ "shape":"RDSDBRecommendations", - "documentation":"

    An array of objects that describe the Amazon RDS recommendations.

    " + "documentation":"

    An array of objects that describe the Amazon Aurora and RDS database recommendations.

    " }, "errors":{ "shape":"GetRecommendationErrors", @@ -4431,8 +4441,7 @@ }, "PutRecommendationPreferencesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RDSCurrentInstancePerformanceRisk":{ "type":"string", @@ -4448,27 +4457,27 @@ "members":{ "dbInstanceClass":{ "shape":"DBInstanceClass", - "documentation":"

    Describes the DB instance class recommendation option for your Amazon RDS instance.

    " + "documentation":"

    Describes the DB instance class recommendation option for your Amazon Aurora or RDS database.

    " }, "projectedUtilizationMetrics":{ "shape":"RDSDBProjectedUtilizationMetrics", - "documentation":"

    An array of objects that describe the projected utilization metrics of the RDS instance recommendation option.

    " + "documentation":"

    An array of objects that describe the projected utilization metrics of the DB instance recommendation option.

    " }, "performanceRisk":{ "shape":"PerformanceRisk", - "documentation":"

    The performance risk of the RDS instance recommendation option.

    " + "documentation":"

    The performance risk of the DB instance recommendation option.

    " }, "rank":{ "shape":"Rank", - "documentation":"

    The rank identifier of the RDS instance recommendation option.

    " + "documentation":"

    The rank identifier of the DB instance recommendation option.

    " }, "savingsOpportunity":{"shape":"SavingsOpportunity"}, "savingsOpportunityAfterDiscounts":{ "shape":"RDSInstanceSavingsOpportunityAfterDiscounts", - "documentation":"

    Describes the savings opportunity for Amazon RDS recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + "documentation":"

    Describes the savings opportunity for Amazon Aurora and RDS database recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " } }, - "documentation":"

    Describes the recommendation options for an Amazon RDS instance.

    " + "documentation":"

    Describes the recommendation options for a DB instance.

    " }, "RDSDBInstanceRecommendationOptions":{ "type":"list", @@ -4494,7 +4503,10 @@ "AuroraMemoryNumKillConnTotal", "AuroraMemoryNumKillQueryTotal", "ReadIOPSEphemeralStorage", - "WriteIOPSEphemeralStorage" + "WriteIOPSEphemeralStorage", + "VolumeReadIOPs", + "VolumeBytesUsed", + "VolumeWriteIOPs" ] }, "RDSDBMetricStatistic":{ @@ -4514,15 +4526,15 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

    The ARN of the current Amazon RDS.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " + "documentation":"

    The ARN of the current Amazon Aurora or RDS database.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " }, "accountId":{ "shape":"AccountId", - "documentation":"

    The Amazon Web Services account ID of the Amazon RDS.

    " + "documentation":"

    The Amazon Web Services account ID of the Amazon Aurora or RDS database.

    " }, "engine":{ "shape":"Engine", - "documentation":"

    The engine of the RDS instance.

    " + "documentation":"

    The engine of the DB instance.

    " }, "engineVersion":{ "shape":"EngineVersion", @@ -4534,11 +4546,11 @@ }, "currentDBInstanceClass":{ "shape":"CurrentDBInstanceClass", - "documentation":"

    The DB instance class of the current RDS instance.

    " + "documentation":"

    The DB instance class of the current Aurora or RDS DB instance.

    " }, "currentStorageConfiguration":{ "shape":"DBStorageConfiguration", - "documentation":"

    The configuration of the current RDS storage.

    " + "documentation":"

    The configuration of the current DB storage.

    " }, "dbClusterIdentifier":{ "shape":"DBClusterIdentifier", @@ -4546,72 +4558,76 @@ }, "idle":{ "shape":"Idle", - "documentation":"

    This indicates if the RDS instance is idle or not.

    " + "documentation":"

    This indicates if the DB instance is idle or not.

    " }, "instanceFinding":{ "shape":"RDSInstanceFinding", - "documentation":"

    The finding classification of an Amazon RDS instance.

    Findings for Amazon RDS instance include:

    • Underprovisioned — When Compute Optimizer detects that there’s not enough resource specifications, an Amazon RDS is considered under-provisioned.

    • Overprovisioned — When Compute Optimizer detects that there’s excessive resource specifications, an Amazon RDS is considered over-provisioned.

    • Optimized — When the specifications of your Amazon RDS instance meet the performance requirements of your workload, the service is considered optimized.

    " + "documentation":"

    The finding classification of an Amazon Aurora and RDS DB instance.

    For more information about finding classifications, see Finding classifications for Aurora and RDS databases in the Compute Optimizer User Guide.

    " }, "storageFinding":{ "shape":"RDSStorageFinding", - "documentation":"

    The finding classification of Amazon RDS storage.

    Findings for Amazon RDS instance include:

    • Underprovisioned — When Compute Optimizer detects that there’s not enough storage, an Amazon RDS is considered under-provisioned.

    • Overprovisioned — When Compute Optimizer detects that there’s excessive storage, an Amazon RDS is considered over-provisioned.

    • Optimized — When the storage of your Amazon RDS meet the performance requirements of your workload, the service is considered optimized.

    " + "documentation":"

    The finding classification of Amazon RDS DB instance storage.

    For more information about finding classifications, see Finding classifications for Aurora and RDS databases in the Compute Optimizer User Guide.

    " }, "instanceFindingReasonCodes":{ "shape":"RDSInstanceFindingReasonCodes", - "documentation":"

    The reason for the finding classification of an Amazon RDS instance.

    " + "documentation":"

    The reason for the finding classification of a DB instance.

    " }, "currentInstancePerformanceRisk":{ "shape":"RDSCurrentInstancePerformanceRisk", "documentation":"

    The performance risk for the current DB instance.

    " }, + "currentStorageEstimatedMonthlyVolumeIOPsCostVariation":{ + "shape":"RDSEstimatedMonthlyVolumeIOPsCostVariation", + "documentation":"

    The level of variation in monthly I/O costs for the current DB storage configuration.

    " + }, "storageFindingReasonCodes":{ "shape":"RDSStorageFindingReasonCodes", - "documentation":"

    The reason for the finding classification of Amazon RDS storage.

    " + "documentation":"

    The reason for the finding classification of RDS DB instance storage.

    " }, "instanceRecommendationOptions":{ "shape":"RDSDBInstanceRecommendationOptions", - "documentation":"

    An array of objects that describe the recommendation options for the Amazon RDS instance.

    " + "documentation":"

    An array of objects that describe the recommendation options for the RDS DB instance.

    " }, "storageRecommendationOptions":{ "shape":"RDSDBStorageRecommendationOptions", - "documentation":"

    An array of objects that describe the recommendation options for Amazon RDS storage.

    " + "documentation":"

    An array of objects that describe the recommendation options for DB instance storage.

    " }, "utilizationMetrics":{ "shape":"RDSDBUtilizationMetrics", - "documentation":"

    An array of objects that describe the utilization metrics of the Amazon RDS.

    " + "documentation":"

    An array of objects that describe the utilization metrics of the DB instance.

    " }, "effectiveRecommendationPreferences":{ "shape":"RDSEffectiveRecommendationPreferences", - "documentation":"

    Describes the effective recommendation preferences for Amazon RDS.

    " + "documentation":"

    Describes the effective recommendation preferences for DB instances.

    " }, "lookbackPeriodInDays":{ "shape":"LookBackPeriodInDays", - "documentation":"

    The number of days the Amazon RDS utilization metrics were analyzed.

    " + "documentation":"

    The number of days the DB instance utilization metrics were analyzed.

    " }, "lastRefreshTimestamp":{ "shape":"LastRefreshTimestamp", - "documentation":"

    The timestamp of when the Amazon RDS recommendation was last generated.

    " + "documentation":"

    The timestamp of when the DB instance recommendation was last generated.

    " }, "tags":{ "shape":"Tags", - "documentation":"

    A list of tags assigned to your Amazon RDS recommendations.

    " + "documentation":"

    A list of tags assigned to your DB instance recommendations.

    " } }, - "documentation":"

    Describes an Amazon RDS recommendation.

    " + "documentation":"

    Describes an Amazon Aurora and RDS database recommendation.

    " }, "RDSDBRecommendationFilter":{ "type":"structure", "members":{ "name":{ "shape":"RDSDBRecommendationFilterName", - "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification.

    You can filter your Amazon RDS recommendations by tag:key and tag-key tags.

    A tag:key is a key and value combination of a tag assigned to your Amazon RDS recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all Amazon RDS service recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    A tag-key is the key of a tag assigned to your Amazon RDS recommendations. Use this filter to find all of your Amazon RDS recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your Amazon RDS service recommendations with a tag key value of Owner or without any tag keys assigned.

    " + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification.

    You can filter your DB instance recommendations by tag:key and tag-key tags.

    A tag:key is a key and value combination of a tag assigned to your DB instance recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all DB instance recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    A tag-key is the key of a tag assigned to your DB instance recommendations. Use this filter to find all of your DB instance recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your DB instance recommendations with a tag key value of Owner or without any tag keys assigned.

    " }, "values":{ "shape":"FilterValues", "documentation":"

    The value of the filter.

    " } }, - "documentation":"

    Describes a filter that returns a more specific list of Amazon RDS recommendations. Use this filter with the GetECSServiceRecommendations action.

    " + "documentation":"

    Describes a filter that returns a more specific list of DB instance recommendations. Use this filter with the GetECSServiceRecommendations action.

    " }, "RDSDBRecommendationFilterName":{ "type":"string", @@ -4640,15 +4656,19 @@ }, "rank":{ "shape":"Rank", - "documentation":"

    The rank identifier of the RDS storage recommendation option.

    " + "documentation":"

    The rank identifier of the DB storage recommendation option.

    " }, "savingsOpportunity":{"shape":"SavingsOpportunity"}, "savingsOpportunityAfterDiscounts":{ "shape":"RDSStorageSavingsOpportunityAfterDiscounts", - "documentation":"

    Describes the savings opportunity for Amazon RDS storage recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + "documentation":"

    Describes the savings opportunity for DB storage recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + }, + "estimatedMonthlyVolumeIOPsCostVariation":{ + "shape":"RDSEstimatedMonthlyVolumeIOPsCostVariation", + "documentation":"

    The projected level of variation in monthly I/O costs for the DB storage recommendation option.

    " } }, - "documentation":"

    Describes the recommendation options for Amazon RDS storage.

    " + "documentation":"

    Describes the recommendation options for DB storage.

    " }, "RDSDBStorageRecommendationOptions":{ "type":"list", @@ -4670,7 +4690,7 @@ "documentation":"

    The value of the utilization metric.

    " } }, - "documentation":"

    Describes the utilization metric of an Amazon RDS.

    To determine the performance difference between your current Amazon RDS and the recommended option, compare the utilization metric data of your service against its projected utilization metric data.

    " + "documentation":"

    Describes the utilization metric of an Amazon Aurora and RDS database.

    To determine the performance difference between your current DB instance and the recommended option, compare the utilization metric data of your service against its projected utilization metric data.

    " }, "RDSDBUtilizationMetrics":{ "type":"list", @@ -4692,7 +4712,7 @@ "documentation":"

    The values for the projected metric.

    " } }, - "documentation":"

    Describes the projected metrics of an Amazon RDS recommendation option.

    To determine the performance difference between your current Amazon RDS and the recommended option, compare the metric data of your service against its projected metric data.

    " + "documentation":"

    Describes the projected metrics of an Amazon Aurora and RDS database recommendation option.

    To determine the performance difference between your current Amazon Aurora and RDS database and the recommended option, compare the metric data of your service against its projected metric data.

    " }, "RDSDatabaseProjectedMetrics":{ "type":"list", @@ -4703,18 +4723,18 @@ "members":{ "recommendedDBInstanceClass":{ "shape":"RecommendedDBInstanceClass", - "documentation":"

    The recommended DB instance class for the Amazon RDS.

    " + "documentation":"

    The recommended DB instance class for the Amazon Aurora or RDS database.

    " }, "rank":{ "shape":"Rank", - "documentation":"

    The rank identifier of the RDS instance recommendation option.

    " + "documentation":"

    The rank identifier of the Amazon Aurora or RDS DB instance recommendation option.

    " }, "projectedMetrics":{ "shape":"RDSDatabaseProjectedMetrics", "documentation":"

    An array of objects that describe the projected metric.

    " } }, - "documentation":"

    Describes the projected metrics of an Amazon RDS recommendation option.

    To determine the performance difference between your current Amazon RDS and the recommended option, compare the metric data of your service against its projected metric data.

    " + "documentation":"

    Describes the projected metrics of an Amazon Aurora and RDS database recommendation option.

    To determine the performance difference between your current Amazon Aurora and RDS database and the recommended option, compare the metric data of your service against its projected metric data.

    " }, "RDSDatabaseRecommendedOptionProjectedMetrics":{ "type":"list", @@ -4725,7 +4745,7 @@ "members":{ "cpuVendorArchitectures":{ "shape":"CpuVendorArchitectures", - "documentation":"

    Describes the CPU vendor and architecture for Amazon RDS recommendations.

    " + "documentation":"

    Describes the CPU vendor and architecture for DB instance recommendations.

    " }, "enhancedInfrastructureMetrics":{ "shape":"EnhancedInfrastructureMetrics", @@ -4733,14 +4753,23 @@ }, "lookBackPeriod":{ "shape":"LookBackPeriodPreference", - "documentation":"

    The number of days the utilization metrics of the Amazon RDS are analyzed.

    " + "documentation":"

    The number of days the utilization metrics of the DB instance are analyzed.

    " }, "savingsEstimationMode":{ "shape":"RDSSavingsEstimationMode", - "documentation":"

    Describes the savings estimation mode preference applied for calculating savings opportunity for Amazon RDS.

    " + "documentation":"

    Describes the savings estimation mode preference applied for calculating savings opportunity for DB instances.

    " } }, - "documentation":"

    Describes the effective recommendation preferences for Amazon RDS.

    " + "documentation":"

    Describes the effective recommendation preferences for Amazon Aurora and RDS databases.

    " + }, + "RDSEstimatedMonthlyVolumeIOPsCostVariation":{ + "type":"string", + "enum":[ + "None", + "Low", + "Medium", + "High" + ] }, "RDSInstanceEstimatedMonthlySavings":{ "type":"structure", @@ -4751,10 +4780,10 @@ }, "value":{ "shape":"Value", - "documentation":"

    The value of the estimated monthly savings for Amazon RDS instances.

    " + "documentation":"

    The value of the estimated monthly savings for DB instances.

    " } }, - "documentation":"

    Describes the estimated monthly savings possible for Amazon RDS instances by adopting Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.

    " + "documentation":"

    Describes the estimated monthly savings possible for DB instances by adopting Compute Optimizer recommendations. This is based on DB instance pricing after applying Savings Plans discounts.

    " }, "RDSInstanceFinding":{ "type":"string", @@ -4792,24 +4821,24 @@ "members":{ "savingsOpportunityPercentage":{ "shape":"SavingsOpportunityPercentage", - "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s DB instance recommendations. This includes any applicable Savings Plans discounts.

    " }, "estimatedMonthlySavings":{ "shape":"RDSInstanceEstimatedMonthlySavings", - "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s DB instance recommendations. This includes any applicable Savings Plans discounts.

    " } }, - "documentation":"

    Describes the savings opportunity for Amazon RDS instance recommendations after applying Savings Plans discounts.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + "documentation":"

    Describes the savings opportunity for DB instance recommendations after applying Savings Plans discounts.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " }, "RDSSavingsEstimationMode":{ "type":"structure", "members":{ "source":{ "shape":"RDSSavingsEstimationModeSource", - "documentation":"

    Describes the source for calculating the savings opportunity for Amazon RDS.

    " + "documentation":"

    Describes the source for calculating the savings opportunity for DB instances.

    " } }, - "documentation":"

    Describes the savings estimation mode used for calculating savings opportunity for Amazon RDS.

    " + "documentation":"

    Describes the savings estimation mode used for calculating savings opportunity for DB instances.

    " }, "RDSSavingsEstimationModeSource":{ "type":"string", @@ -4828,17 +4857,18 @@ }, "value":{ "shape":"Value", - "documentation":"

    The value of the estimated monthly savings for Amazon RDS storage.

    " + "documentation":"

    The value of the estimated monthly savings for DB instance storage.

    " } }, - "documentation":"

    Describes the estimated monthly savings possible for Amazon RDS storage by adopting Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.

    " + "documentation":"

    Describes the estimated monthly savings possible for DB instance storage by adopting Compute Optimizer recommendations. This is based on DB instance pricing after applying Savings Plans discounts.

    " }, "RDSStorageFinding":{ "type":"string", "enum":[ "Optimized", "Underprovisioned", - "Overprovisioned" + "Overprovisioned", + "NotOptimized" ] }, "RDSStorageFindingReasonCode":{ @@ -4848,7 +4878,9 @@ "EBSVolumeThroughputUnderprovisioned", "EBSVolumeIOPSOverprovisioned", "EBSVolumeThroughputOverprovisioned", - "NewGenerationStorageTypeAvailable" + "NewGenerationStorageTypeAvailable", + "DBClusterStorageOptionAvailable", + "DBClusterStorageSavingsAvailable" ] }, "RDSStorageFindingReasonCodes":{ @@ -4860,11 +4892,11 @@ "members":{ "savingsOpportunityPercentage":{ "shape":"SavingsOpportunityPercentage", - "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s DB instance storage recommendations. This includes any applicable Savings Plans discounts.

    " }, "estimatedMonthlySavings":{ "shape":"RDSStorageEstimatedMonthlySavings", - "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s DB instance storage recommendations. This includes any applicable Savings Plans discounts.

    " } }, "documentation":"

    Describes the savings opportunity for Amazon RDS storage recommendations after applying Savings Plans discounts.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " @@ -5026,7 +5058,8 @@ "EcsService", "License", "RdsDBInstance", - "RdsDBInstanceStorage" + "RdsDBInstanceStorage", + "AuroraDBClusterStorage" ] }, "RecommendationSources":{ @@ -5123,6 +5156,7 @@ "EcsService", "License", "RdsDBInstance", + "AuroraDBClusterStorage", "Idle" ] }, diff --git a/services/config/pom.xml b/services/config/pom.xml index 9f2415dd12dc..19dc528b346e 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 3009d2d7690b..9af4f2027b35 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 7dc7ff34ca6c..ec4202282e8b 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -11928,7 +11928,7 @@ "EmailAddressRecipientList":{ "type":"list", "member":{"shape":"EmailAddressInfo"}, - "max":10, + "max":50, "min":1 }, "EmailAddressSearchConditionList":{ diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index 219fa0fc3f64..5ed6ed44fcc1 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcampaignsv2/pom.xml b/services/connectcampaignsv2/pom.xml index 146fd43c6f6a..7cb9d17925c3 100644 --- a/services/connectcampaignsv2/pom.xml +++ b/services/connectcampaignsv2/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT connectcampaignsv2 AWS Java SDK :: Services :: Connect Campaigns V2 diff --git a/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json b/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json index d1a02e3c219a..4ef9cd3f1bb8 100644 --- a/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json @@ -231,6 +231,23 @@ ], "documentation":"

    Get the specific Connect instance config.

    " }, + "GetInstanceCommunicationLimits":{ + "name":"GetInstanceCommunicationLimits", + "http":{ + "method":"GET", + "requestUri":"/v2/connect-instance/{connectInstanceId}/communication-limits", + "responseCode":200 + }, + "input":{"shape":"GetInstanceCommunicationLimitsRequest"}, + "output":{"shape":"GetInstanceCommunicationLimitsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Get the instance communication limits.

    " + }, "GetInstanceOnboardingJobStatus":{ "name":"GetInstanceOnboardingJobStatus", "http":{ @@ -338,6 +355,24 @@ "documentation":"

    Put or update the integration for the specified Amazon Connect instance.

    ", "idempotent":true }, + "PutInstanceCommunicationLimits":{ + "name":"PutInstanceCommunicationLimits", + "http":{ + "method":"PUT", + "requestUri":"/v2/connect-instance/{connectInstanceId}/communication-limits", + "responseCode":200 + }, + "input":{"shape":"PutInstanceCommunicationLimitsRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Put the instance communication limits. This API is idempotent.

    ", + "idempotent":true + }, "PutOutboundRequestBatch":{ "name":"PutOutboundRequestBatch", "http":{ @@ -900,7 +935,8 @@ "CommunicationLimitsConfig":{ "type":"structure", "members":{ - "allChannelSubtypes":{"shape":"CommunicationLimits"} + "allChannelSubtypes":{"shape":"CommunicationLimits"}, + "instanceLimitsHandling":{"shape":"InstanceLimitsHandling"} }, "documentation":"

    Communication limits config

    " }, @@ -1415,6 +1451,25 @@ }, "documentation":"

    The response for GetConnectInstanceConfig API.

    " }, + "GetInstanceCommunicationLimitsRequest":{ + "type":"structure", + "required":["connectInstanceId"], + "members":{ + "connectInstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"connectInstanceId" + } + }, + "documentation":"

    The request for GetInstanceCommunicationLimits API.

    " + }, + "GetInstanceCommunicationLimitsResponse":{ + "type":"structure", + "members":{ + "communicationLimitsConfig":{"shape":"InstanceCommunicationLimitsConfig"} + }, + "documentation":"

    The response for GetInstanceCommunicationLimits API.

    " + }, "GetInstanceOnboardingJobStatusRequest":{ "type":"structure", "required":["connectInstanceId"], @@ -1434,6 +1489,13 @@ }, "documentation":"

    The response for GetInstanceOnboardingJobStatus API.

    " }, + "InstanceCommunicationLimitsConfig":{ + "type":"structure", + "members":{ + "allChannelSubtypes":{"shape":"CommunicationLimits"} + }, + "documentation":"

    Instance Communication limits config

    " + }, "InstanceConfig":{ "type":"structure", "required":[ @@ -1472,6 +1534,14 @@ "documentation":"

    Operators for Connect instance identifier filter

    ", "enum":["Eq"] }, + "InstanceLimitsHandling":{ + "type":"string", + "documentation":"

    Instance limits handling

    ", + "enum":[ + "OPT_IN", + "OPT_OUT" + ] + }, "InstanceOnboardingJobFailureCode":{ "type":"string", "documentation":"

    Enumeration of the possible failure codes for instance onboarding job

    ", @@ -1840,6 +1910,22 @@ }, "documentation":"

    The request for PutConnectInstanceIntegration API.

    " }, + "PutInstanceCommunicationLimitsRequest":{ + "type":"structure", + "required":[ + "connectInstanceId", + "communicationLimitsConfig" + ], + "members":{ + "connectInstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"connectInstanceId" + }, + "communicationLimitsConfig":{"shape":"InstanceCommunicationLimitsConfig"} + }, + "documentation":"

    The request for PutInstanceCommunicationLimits API.

    " + }, "PutOutboundRequestBatchRequest":{ "type":"structure", "required":[ diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index 4f7a720818b7..793079a60bd2 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index 4ed562892946..7fe7cd46fbb4 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 13498c687f44..b4ae059f1fb9 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/controlcatalog/pom.xml b/services/controlcatalog/pom.xml index c243b2854393..7030a505372c 100644 --- a/services/controlcatalog/pom.xml +++ b/services/controlcatalog/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT controlcatalog AWS Java SDK :: Services :: Control Catalog diff --git a/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json b/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json index 2ff838410cfc..16cfd2c30469 100644 --- a/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json +++ b/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "MaxResults", "result_key": "CommonControls" }, + "ListControlMappings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ControlMappings" + }, "ListControls": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/controlcatalog/src/main/resources/codegen-resources/service-2.json b/services/controlcatalog/src/main/resources/codegen-resources/service-2.json index 7a2f6a6de542..61f793fe6c99 100644 --- a/services/controlcatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/controlcatalog/src/main/resources/codegen-resources/service-2.json @@ -48,6 +48,23 @@ ], "documentation":"

    Returns a paginated list of common controls from the Amazon Web Services Control Catalog.

    You can apply an optional filter to see common controls that have a specific objective. If you don’t provide a filter, the operation returns all common controls.

    " }, + "ListControlMappings":{ + "name":"ListControlMappings", + "http":{ + "method":"POST", + "requestUri":"/list-control-mappings", + "responseCode":200 + }, + "input":{"shape":"ListControlMappingsRequest"}, + "output":{"shape":"ListControlMappingsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a paginated list of control mappings from the Control Catalog. Control mappings show relationships between controls and other entities, such as common controls or compliance frameworks.

    " + }, "ListControls":{ "name":"ListControls", "http":{ @@ -63,7 +80,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.

    " + "documentation":"

    Returns a paginated list of all available controls in the Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.

    " }, "ListDomains":{ "name":"ListDomains", @@ -80,7 +97,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a paginated list of domains from the Amazon Web Services Control Catalog.

    " + "documentation":"

    Returns a paginated list of domains from the Control Catalog.

    " }, "ListObjectives":{ "name":"ListObjectives", @@ -97,7 +114,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a paginated list of objectives from the Amazon Web Services Control Catalog.

    You can apply an optional filter to see the objectives that belong to a specific domain. If you don’t provide a filter, the operation returns all objectives.

    " + "documentation":"

    Returns a paginated list of objectives from the Control Catalog.

    You can apply an optional filter to see the objectives that belong to a specific domain. If you don’t provide a filter, the operation returns all objectives.

    " } }, "shapes":{ @@ -147,16 +164,33 @@ "min":41, "pattern":"arn:(aws(?:[-a-z]*)?):controlcatalog:::common-control/[0-9a-z]+" }, + "CommonControlArnFilterList":{ + "type":"list", + "member":{"shape":"CommonControlArn"}, + "max":1, + "min":1 + }, "CommonControlFilter":{ "type":"structure", "members":{ "Objectives":{ "shape":"ObjectiveResourceFilterList", - "documentation":"

    The objective that's used as filter criteria.

    You can use this parameter to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported.

    " + "documentation":"

    The objective that's used as filter criteria.

    You can use this parameter to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t supported.

    " } }, "documentation":"

    An optional filter that narrows the results to a specific objective.

    " }, + "CommonControlMappingDetails":{ + "type":"structure", + "required":["CommonControlArn"], + "members":{ + "CommonControlArn":{ + "shape":"CommonControlArn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the common control in the mapping.

    " + } + }, + "documentation":"

    A structure that contains details about a common control mapping. In particular, it returns the Amazon Resource Name (ARN) of the common control.

    " + }, "CommonControlSummary":{ "type":"structure", "required":[ @@ -204,12 +238,26 @@ "type":"list", "member":{"shape":"CommonControlSummary"} }, + "ControlAlias":{ + "type":"string", + "pattern":"[a-zA-Z0-9](?:[a-zA-Z0-9_.-]{0,254}[a-zA-Z0-9])" + }, + "ControlAliases":{ + "type":"list", + "member":{"shape":"ControlAlias"} + }, "ControlArn":{ "type":"string", "max":2048, "min":34, "pattern":"arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+" }, + "ControlArnFilterList":{ + "type":"list", + "member":{"shape":"ControlArn"}, + "max":1, + "min":1 + }, "ControlBehavior":{ "type":"string", "enum":[ @@ -218,6 +266,61 @@ "DETECTIVE" ] }, + "ControlFilter":{ + "type":"structure", + "members":{ + "Implementations":{ + "shape":"ImplementationFilter", + "documentation":"

    A filter that narrows the results to controls with specific implementation types or identifiers. This field allows you to find controls that are implemented by specific Amazon Web Services services or with specific service identifiers.

    " + } + }, + "documentation":"

    A structure that defines filtering criteria for the ListControls operation. You can use this filter to narrow down the list of controls based on their implementation details.

    " + }, + "ControlMapping":{ + "type":"structure", + "required":[ + "ControlArn", + "MappingType", + "Mapping" + ], + "members":{ + "ControlArn":{ + "shape":"ControlArn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the control in the mapping.

    " + }, + "MappingType":{ + "shape":"MappingType", + "documentation":"

    The type of mapping relationship between the control and other entities. Indicates whether the mapping is to a framework or common control.

    " + }, + "Mapping":{ + "shape":"Mapping", + "documentation":"

    The details of the mapping relationship, containing either framework or common control information.

    " + } + }, + "documentation":"

    A structure that contains information about a control mapping, including the control ARN, mapping type, and mapping details.

    " + }, + "ControlMappingFilter":{ + "type":"structure", + "members":{ + "ControlArns":{ + "shape":"ControlArnFilterList", + "documentation":"

    A list of control ARNs to filter the mappings. When specified, only mappings associated with these controls are returned.

    " + }, + "CommonControlArns":{ + "shape":"CommonControlArnFilterList", + "documentation":"

    A list of common control ARNs to filter the mappings. When specified, only mappings associated with these common controls are returned.

    " + }, + "MappingTypes":{ + "shape":"MappingTypeFilterList", + "documentation":"

    A list of mapping types to filter the mappings. When specified, only mappings of these types are returned.

    " + } + }, + "documentation":"

    A structure that defines filtering criteria for the ListControlMappings operation. You can use this filter to narrow down the list of control mappings based on control ARNs, common control ARNs, or mapping types.

    " + }, + "ControlMappings":{ + "type":"list", + "member":{"shape":"ControlMapping"} + }, "ControlParameter":{ "type":"structure", "required":["Name"], @@ -261,6 +364,10 @@ "shape":"ControlArn", "documentation":"

    The Amazon Resource Name (ARN) of the control.

    " }, + "Aliases":{ + "shape":"ControlAliases", + "documentation":"

    A list of alternative identifiers for the control. These are human-readable designators, such as SH.S3.1. Several aliases can refer to the same control across different Amazon Web Services services or compliance frameworks.

    " + }, "Name":{ "shape":"String", "documentation":"

    The display name of the control.

    " @@ -284,6 +391,10 @@ "CreateTime":{ "shape":"Timestamp", "documentation":"

    A timestamp that notes the time when the control was released (start of its life) as a governance capability in Amazon Web Services.

    " + }, + "GovernedResources":{ + "shape":"GovernedResources", + "documentation":"

    A list of Amazon Web Services resource types that are governed by this control. This information helps you understand which controls can govern certain types of resources, and conversely, which resources are affected when the control is implemented. The resources are represented as Amazon Web Services CloudFormation resource types. If GovernedResources cannot be represented by available CloudFormation resource types, it’s returned as an empty list.

    " } }, "documentation":"

    Overview of information about a control.

    " @@ -353,6 +464,34 @@ "type":"list", "member":{"shape":"DomainSummary"} }, + "FrameworkItem":{ + "type":"string", + "max":250, + "min":3 + }, + "FrameworkMappingDetails":{ + "type":"structure", + "required":[ + "Name", + "Item" + ], + "members":{ + "Name":{ + "shape":"FrameworkName", + "documentation":"

    The name of the compliance framework that the control maps to.

    " + }, + "Item":{ + "shape":"FrameworkItem", + "documentation":"

    The specific item or requirement within the framework that the control maps to.

    " + } + }, + "documentation":"

    A structure that contains details about a framework mapping, including the framework name and specific item within the framework that the control maps to.

    " + }, + "FrameworkName":{ + "type":"string", + "max":250, + "min":3 + }, "GetControlRequest":{ "type":"structure", "required":["ControlArn"], @@ -377,6 +516,10 @@ "shape":"ControlArn", "documentation":"

    The Amazon Resource Name (ARN) of the control.

    " }, + "Aliases":{ + "shape":"ControlAliases", + "documentation":"

    A list of alternative identifiers for the control. These are human-readable designators, such as SH.S3.1. Several aliases can refer to the same control across different Amazon Web Services services or compliance frameworks.

    " + }, "Name":{ "shape":"String", "documentation":"

    The display name of the control.

    " @@ -405,9 +548,21 @@ "CreateTime":{ "shape":"Timestamp", "documentation":"

    A timestamp that notes the time when the control was released (start of its life) as a governance capability in Amazon Web Services.

    " + }, + "GovernedResources":{ + "shape":"GovernedResources", + "documentation":"

    A list of Amazon Web Services resource types that are governed by this control. This information helps you understand which controls can govern certain types of resources, and conversely, which resources are affected when the control is implemented. The resources are represented as Amazon Web Services CloudFormation resource types. If GovernedResources cannot be represented by available CloudFormation resource types, it’s returned as an empty list.

    " } } }, + "GovernedResource":{ + "type":"string", + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + }, + "GovernedResources":{ + "type":"list", + "member":{"shape":"GovernedResource"} + }, "ImplementationDetails":{ "type":"structure", "required":["Type"], @@ -423,11 +578,31 @@ }, "documentation":"

    An object that describes the implementation type for a control.

    Our ImplementationDetails Type format has three required segments:

    • SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME

    For example, AWS::Config::ConfigRule or AWS::SecurityHub::SecurityControl resources have the format with three required segments.

    Our ImplementationDetails Type format has an optional fourth segment, which is present for applicable implementation types. The format is as follows:

    • SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION

    For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY or AWS::CloudFormation::Type::HOOK have the format with four segments.

    Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values.

    " }, + "ImplementationFilter":{ + "type":"structure", + "members":{ + "Types":{ + "shape":"ImplementationTypeFilterList", + "documentation":"

    A list of implementation types that can serve as filters. For example, you can filter for controls implemented as Amazon Web Services Config Rules by specifying AWS::Config::ConfigRule as a type.

    " + }, + "Identifiers":{ + "shape":"ImplementationIdentifierFilterList", + "documentation":"

    A list of service-specific identifiers that can serve as filters. For example, you can filter for controls with specific Amazon Web Services Config Rule IDs or Security Hub Control IDs.

    " + } + }, + "documentation":"

    A structure that defines filtering criteria for control implementations. You can use this filter to find controls that are implemented by specific Amazon Web Services services or with specific service identifiers.

    " + }, "ImplementationIdentifier":{ "type":"string", "max":256, "min":1, - "pattern":"[a-z0-9-]+" + "pattern":"[a-zA-Z0-9_\\.-]+" + }, + "ImplementationIdentifierFilterList":{ + "type":"list", + "member":{"shape":"ImplementationIdentifier"}, + "max":1, + "min":1 }, "ImplementationSummary":{ "type":"structure", @@ -450,6 +625,12 @@ "min":7, "pattern":"[A-Za-z0-9]+(::[A-Za-z0-9_]+){2,3}" }, + "ImplementationTypeFilterList":{ + "type":"list", + "member":{"shape":"ImplementationType"}, + "max":1, + "min":1 + }, "InternalServerException":{ "type":"structure", "members":{ @@ -478,7 +659,7 @@ }, "CommonControlFilter":{ "shape":"CommonControlFilter", - "documentation":"

    An optional filter that narrows the results to a specific objective.

    This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported.

    " + "documentation":"

    An optional filter that narrows the results to a specific objective.

    This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t supported.

    " } } }, @@ -496,6 +677,41 @@ } } }, + "ListControlMappingsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    The pagination token that's used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxListControlMappingsResults", + "documentation":"

    The maximum number of results on a page or for an API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "Filter":{ + "shape":"ControlMappingFilter", + "documentation":"

    An optional filter that narrows the results to specific control mappings based on control ARNs, common control ARNs, or mapping types.

    " + } + } + }, + "ListControlMappingsResponse":{ + "type":"structure", + "required":["ControlMappings"], + "members":{ + "ControlMappings":{ + "shape":"ControlMappings", + "documentation":"

    The list of control mappings that the ListControlMappings API returns.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    The pagination token that's used to fetch the next set of results.

    " + } + } + }, "ListControlsRequest":{ "type":"structure", "members":{ @@ -510,6 +726,10 @@ "documentation":"

    The maximum number of results on a page or for an API request call.

    ", "location":"querystring", "locationName":"maxResults" + }, + "Filter":{ + "shape":"ControlFilter", + "documentation":"

    An optional filter that narrows the results to controls with specific implementation types or identifiers. If you don't provide a filter, the operation returns all available controls.

    " } } }, @@ -575,7 +795,7 @@ }, "ObjectiveFilter":{ "shape":"ObjectiveFilter", - "documentation":"

    An optional filter that narrows the results to a specific domain.

    This filter allows you to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t currently supported.

    " + "documentation":"

    An optional filter that narrows the results to a specific domain.

    This filter allows you to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t supported.

    " } } }, @@ -593,12 +813,46 @@ } } }, + "Mapping":{ + "type":"structure", + "members":{ + "Framework":{ + "shape":"FrameworkMappingDetails", + "documentation":"

    The framework mapping details when the mapping type relates to a compliance framework.

    " + }, + "CommonControl":{ + "shape":"CommonControlMappingDetails", + "documentation":"

    The common control mapping details when the mapping type relates to a common control.

    " + } + }, + "documentation":"

    A structure that contains the details of a mapping relationship, which can be either to a framework or to a common control.

    ", + "union":true + }, + "MappingType":{ + "type":"string", + "enum":[ + "FRAMEWORK", + "COMMON_CONTROL" + ] + }, + "MappingTypeFilterList":{ + "type":"list", + "member":{"shape":"MappingType"}, + "max":1, + "min":1 + }, "MaxListCommonControlsResults":{ "type":"integer", "box":true, "max":100, "min":1 }, + "MaxListControlMappingsResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, "MaxListControlsResults":{ "type":"integer", "box":true, @@ -628,7 +882,7 @@ "members":{ "Domains":{ "shape":"DomainResourceFilterList", - "documentation":"

    The domain that's used as filter criteria.

    You can use this parameter to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t currently supported.

    " + "documentation":"

    The domain that's used as filter criteria.

    You can use this parameter to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t supported.

    " } }, "documentation":"

    An optional filter that narrows the list of objectives to a specific domain.

    " @@ -711,7 +965,7 @@ "documentation":"

    Regions in which the control is available to be deployed.

    " } }, - "documentation":"

    Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment. For more information about scope, see Global services.

    If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

    " + "documentation":"

    Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control is available for deployment. For more information about scope, see Global services.

    If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

    " }, "ResourceNotFoundException":{ "type":"structure", @@ -753,5 +1007,5 @@ "exception":true } }, - "documentation":"

    Welcome to the Amazon Web Services Control Catalog API reference. This guide is for developers who need detailed information about how to programmatically identify and filter the common controls and related metadata that are available to Amazon Web Services customers. This API reference provides descriptions, syntax, and usage examples for each of the actions and data types that are supported by Amazon Web Services Control Catalog.

    Use the following links to get started with the Amazon Web Services Control Catalog API:

    • Actions: An alphabetical list of all Control Catalog API operations.

    • Data types: An alphabetical list of all Control Catalog data types.

    • Common parameters: Parameters that all operations can use.

    • Common errors: Client and server errors that all operations can return.

    " + "documentation":"

    Welcome to the Control Catalog API reference. This guide is for developers who need detailed information about how to programmatically identify and filter the common controls and related metadata that are available to Amazon Web Services customers. This API reference provides descriptions, syntax, and usage examples for each of the actions and data types that are supported by Control Catalog.

    Use the following links to get started with the Control Catalog API:

    • Actions: An alphabetical list of all Control Catalog API operations.

    • Data types: An alphabetical list of all Control Catalog data types.

    • Common parameters: Parameters that all operations can use.

    • Common errors: Client and server errors that all operations can return.

    " } diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 51c2fcf99c21..074206d7e036 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 8adb0e77fa26..faef4af2c038 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index 0ff1d5605dcd..ad7ec795b7ef 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json index 12e77aabd32b..6e774dcfc1d4 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -57,564 +57,616 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "aws.partition", "argv": [ { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] }, - "name" + "aws" ] }, - "aws" - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true + ] + } + ], + "endpoint": { + "url": "https://ce.us-east-1.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } ] }, - "aws-cn" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true + ] + } + ], + "endpoint": { + "url": "https://ce.cn-northwest-1.api.amazonwebservices.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } ] }, - "aws-iso" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-iso-east-1.c2s.ic.gov", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-iso-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-iso-east-1.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } ] }, - "aws-iso-b" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-b" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-isob-east-1.sc2s.sgov.gov", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isob-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-isob-east-1.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } ] }, - "aws-iso-e" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-e" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "eu-isoe-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + false + ] + } + ], + "endpoint": { + "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } ] }, - "aws-iso-f" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-f" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-isof-south-1.csp.hci.ic.gov", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isof-south-1" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" + ], + "endpoint": { + "url": "https://ce.us-isof-south-1.csp.hci.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] }, - true - ] + "headers": {} + }, + "type": "endpoint" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseFIPS" }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://ce.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] } \ No newline at end of file diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json index e57583a59213..39c2631147c5 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,31 +1,50 @@ { "testCases": [ { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-east-1" - } - ] - }, - "url": "https://ce.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "aws-global", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://ce-fips.us-east-1.api.aws" } }, @@ -39,6 +58,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://ce-fips.us-east-1.amazonaws.com" } }, @@ -52,6 +79,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://ce.us-east-1.api.aws" } }, @@ -69,7 +104,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "us-east-1" } ] @@ -84,75 +118,76 @@ } }, { - "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "cn-northwest-1" } ] }, - "url": "https://ce.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "aws-cn-global", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://ce-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://ce-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ce-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://ce-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ce.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://ce.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "cn-northwest-1" } ] @@ -161,81 +196,91 @@ } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ce-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://ce-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ce-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://ce-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ce.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://ce.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://ce.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region aws-iso-global with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-iso-east-1" + "signingRegion": "us-gov-west-1" } ] }, - "url": "https://ce.us-iso-east-1.c2s.ic.gov" + "url": "https://ce.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "aws-iso-global", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -255,6 +300,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://ce-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -283,7 +336,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "us-iso-east-1" } ] @@ -297,28 +349,6 @@ "UseDualStack": false } }, - { - "documentation": "For region aws-iso-b-global with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isob-east-1" - } - ] - }, - "url": "https://ce.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "aws-iso-b-global", - "UseFIPS": false, - "UseDualStack": false - } - }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -334,6 +364,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://ce-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -362,7 +400,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "us-isob-east-1" } ] @@ -377,98 +414,131 @@ } }, { - "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "eu-isoe-west-1" } ] }, - "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk" + "url": "https://ce-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { "Region": "eu-isoe-west-1", - "UseFIPS": false, + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isof-south-1" + "signingRegion": "eu-isoe-west-1" } ] }, - "url": "https://ce.us-isof-south-1.csp.hci.ic.gov" + "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-isof-south-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://example.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://ce-fips.us-isof-south-1.csp.hci.ic.gov" } }, "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://ce.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/services/costoptimizationhub/pom.xml b/services/costoptimizationhub/pom.xml index 542450836bdc..80054045de1a 100644 --- a/services/costoptimizationhub/pom.xml +++ b/services/costoptimizationhub/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT costoptimizationhub AWS Java SDK :: Services :: Cost Optimization Hub diff --git a/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json b/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json index 6a7d2f52a745..e45fac809577 100644 --- a/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json +++ b/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json @@ -201,6 +201,27 @@ "LowestPrice" ] }, + "AuroraDbClusterStorage":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"AuroraDbClusterStorageConfiguration", + "documentation":"

    The Aurora DB cluster storage configuration used for recommendations.

    " + }, + "costCalculation":{"shape":"ResourceCostCalculation"} + }, + "documentation":"

    Contains the details of an Aurora DB cluster storage.

    " + }, + "AuroraDbClusterStorageConfiguration":{ + "type":"structure", + "members":{ + "storageType":{ + "shape":"String", + "documentation":"

    The storage type to associate with the Aurora DB cluster.

    " + } + }, + "documentation":"

    The Aurora DB cluster storage configuration used for recommendations.

    " + }, "BlockStoragePerformanceConfiguration":{ "type":"structure", "members":{ @@ -758,6 +779,10 @@ "memberAccountDiscountVisibility":{ "shape":"MemberAccountDiscountVisibility", "documentation":"

    Retrieves the status of the \"member account discount visibility\" preference.

    " + }, + "preferredCommitment":{ + "shape":"PreferredCommitment", + "documentation":"

    Retrieves the current preferences for how Reserved Instances and Savings Plans cost-saving opportunities are prioritized in terms of payment option and term length.

    " } } }, @@ -1094,7 +1119,7 @@ }, "costCalculation":{"shape":"ReservedInstancesCostCalculation"} }, - "documentation":"

    The MemoryDB reserved instances recommendation details.

    MemoryDB reserved instances are referred to as \"MemoryDB reserved nodes\" in customer-facing documentation.

    " + "documentation":"

    The MemoryDB reserved instances recommendation details.

    While the API reference uses \"MemoryDB reserved instances\", the user guide and other documentation refer to them as \"MemoryDB reserved nodes\", as the terms are used interchangeably.

    " }, "MemoryDbReservedInstancesConfiguration":{ "type":"structure", @@ -1152,7 +1177,7 @@ "documentation":"

    Determines whether the recommendation is for a current generation instance.

    " } }, - "documentation":"

    The MemoryDB reserved instances configuration used for recommendations.

    MemoryDB reserved instances are referred to as \"MemoryDB reserved nodes\" in customer-facing documentation.

    " + "documentation":"

    The MemoryDB reserved instances configuration used for recommendations.

    While the API reference uses \"MemoryDB reserved instances\", the user guide and other documentation refer to them as \"MemoryDB reserved nodes\", as the terms are used interchangeably.

    " }, "MixedInstanceConfiguration":{ "type":"structure", @@ -1257,6 +1282,28 @@ }, "documentation":"

    Defines how rows will be sorted in the response.

    " }, + "PaymentOption":{ + "type":"string", + "enum":[ + "AllUpfront", + "PartialUpfront", + "NoUpfront" + ] + }, + "PreferredCommitment":{ + "type":"structure", + "members":{ + "term":{ + "shape":"Term", + "documentation":"

    The preferred length of the commitment period. If the value is null, it will default to ThreeYears (highest savings) where applicable.

    " + }, + "paymentOption":{ + "shape":"PaymentOption", + "documentation":"

    The preferred upfront payment structure for commitments. If the value is null, it will default to AllUpfront (highest savings) where applicable.

    " + } + }, + "documentation":"

    The preferred configuration for Reserved Instances and Savings Plans commitment-based discounts, consisting of a payment option and a commitment duration.

    " + }, "PrimitiveBoolean":{"type":"boolean"}, "RdsDbInstance":{ "type":"structure", @@ -1715,6 +1762,10 @@ "shape":"RdsDbInstanceStorage", "documentation":"

    The DB instance storage recommendation details.

    " }, + "auroraDbClusterStorage":{ + "shape":"AuroraDbClusterStorage", + "documentation":"

    The Aurora DB cluster storage recommendation details.

    " + }, "dynamoDbReservedCapacity":{ "shape":"DynamoDbReservedCapacity", "documentation":"

    The DynamoDB reserved capacity recommendation details.

    " @@ -1789,6 +1840,7 @@ "ElastiCacheReservedInstances", "RdsDbInstanceStorage", "RdsDbInstance", + "AuroraDbClusterStorage", "DynamoDbReservedCapacity", "MemoryDbReservedInstances" ] @@ -1936,6 +1988,13 @@ "max":100, "min":1 }, + "Term":{ + "type":"string", + "enum":[ + "OneYear", + "ThreeYears" + ] + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -1978,6 +2037,10 @@ "memberAccountDiscountVisibility":{ "shape":"MemberAccountDiscountVisibility", "documentation":"

    Sets the \"member account discount visibility\" preference.

    " + }, + "preferredCommitment":{ + "shape":"PreferredCommitment", + "documentation":"

    Sets the preferences for how Reserved Instances and Savings Plans cost-saving opportunities are prioritized in terms of payment option and term length.

    " } } }, @@ -1991,6 +2054,10 @@ "memberAccountDiscountVisibility":{ "shape":"MemberAccountDiscountVisibility", "documentation":"

    Shows the status of the \"member account discount visibility\" preference.

    " + }, + "preferredCommitment":{ + "shape":"PreferredCommitment", + "documentation":"

    Shows the updated preferences for how Reserved Instances and Savings Plans cost-saving opportunities are prioritized in terms of payment option and term length.

    " } } }, diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 19333efe097b..b9387d59fde3 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json index ef6e528e5670..d8c945a0b63c 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "MaxResults", "result_key": "ProfileIds" }, + "ListDomainLayouts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, "ListEventStreams": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json index 00a74a1066b7..a599196f6fac 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -100,6 +100,23 @@ ], "documentation":"

    Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

    Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

    Use this API or UpdateDomain to enable identity resolution: set Matching to true.

    To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

    It is not possible to associate a Customer Profiles domain with an Amazon Connect Instance directly from the API. If you would like to create a domain and associate a Customer Profiles domain, use the Amazon Connect admin website. For more information, see Enable Customer Profiles.

    Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

    " }, + "CreateDomainLayout":{ + "name":"CreateDomainLayout", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"CreateDomainLayoutRequest"}, + "output":{"shape":"CreateDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates the layout to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "CreateEventStream":{ "name":"CreateEventStream", "http":{ @@ -255,6 +272,23 @@ ], "documentation":"

    Deletes a specific domain and all of its customer data, such as customer profile attributes and their related objects.

    " }, + "DeleteDomainLayout":{ + "name":"DeleteDomainLayout", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"DeleteDomainLayoutRequest"}, + "output":{"shape":"DeleteDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the layout used to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "DeleteEventStream":{ "name":"DeleteEventStream", "http":{ @@ -496,6 +530,23 @@ ], "documentation":"

    Returns information about a specific domain.

    " }, + "GetDomainLayout":{ + "name":"GetDomainLayout", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"GetDomainLayoutRequest"}, + "output":{"shape":"GetDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the layout to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "GetEventStream":{ "name":"GetEventStream", "http":{ @@ -788,6 +839,23 @@ ], "documentation":"

    Retrieve a list of calculated attributes for a customer profile.

    " }, + "ListDomainLayouts":{ + "name":"ListDomainLayouts", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/layouts" + }, + "input":{"shape":"ListDomainLayoutsRequest"}, + "output":{"shape":"ListDomainLayoutsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the existing layouts that can be used to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "ListDomains":{ "name":"ListDomains", "http":{ @@ -1173,6 +1241,23 @@ ], "documentation":"

    Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

    After a domain is created, the name can’t be changed.

    Use this API or CreateDomain to enable identity resolution: set Matching to true.

    To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

    To add or remove tags on an existing Domain, see TagResource/UntagResource.

    " }, + "UpdateDomainLayout":{ + "name":"UpdateDomainLayout", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"UpdateDomainLayoutRequest"}, + "output":{"shape":"UpdateDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the layout used to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "UpdateEventTrigger":{ "name":"UpdateEventTrigger", "http":{ @@ -1551,7 +1636,7 @@ "AttributeList":{ "type":"list", "member":{"shape":"AttributeItem"}, - "max":2, + "max":50, "min":1 }, "AttributeMap":{ @@ -1876,6 +1961,10 @@ "Value":{ "shape":"string1To255", "documentation":"

    The value of the calculated attribute.

    " + }, + "LastObjectTimestamp":{ + "shape":"timestamp", + "documentation":"

    The timestamp of the newest object included in the calculated attribute calculation.

    " } }, "documentation":"

    The object containing the values of a single calculated attribute value.

    " @@ -2052,6 +2141,10 @@ "shape":"Statistic", "documentation":"

    The aggregation operation to perform for the calculated attribute.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -2097,9 +2190,124 @@ "shape":"timestamp", "documentation":"

    The timestamp of when the calculated attribute definition was most recently edited.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed.)

    " + }, + "Readiness":{ + "shape":"Readiness", + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "CreateDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "Layout" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "CreateDomainLayoutResponse":{ + "type":"structure", + "required":[ + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "Layout", + "Version", + "CreatedAt" + ], + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under customer profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under Customer Profiles domain.

    " + }, + "Version":{ + "shape":"string1To255", + "documentation":"

    The version used to create layout.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " } } }, @@ -2388,7 +2596,7 @@ }, "AccountNumber":{ "shape":"sensitiveString1To255", - "documentation":"

    An account number that you have given to the customer.

    " + "documentation":"

    An account number that you have assigned to the customer.

    " }, "AdditionalInformation":{ "shape":"sensitiveString1To1000", @@ -2735,7 +2943,37 @@ }, "DeleteCalculatedAttributeDefinitionResponse":{ "type":"structure", + "members":{} + }, + "DeleteDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName" + ], "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + } + } + }, + "DeleteDomainLayoutResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } } }, "DeleteDomainRequest":{ @@ -2783,8 +3021,7 @@ }, "DeleteEventStreamResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventTriggerRequest":{ "type":"structure", @@ -3035,8 +3272,7 @@ }, "DeleteWorkflowResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DestinationField":{ "type":"string", @@ -3174,11 +3410,7 @@ "max":3, "min":1 }, - "End":{ - "type":"integer", - "max":366, - "min":0 - }, + "End":{"type":"integer"}, "EstimateStatus":{ "type":"string", "enum":[ @@ -3815,6 +4047,18 @@ "shape":"AttributeDetails", "documentation":"

    Mathematical expression and a list of attribute items specified in that expression.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed).

    " + }, + "Readiness":{ + "shape":"Readiness", + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -3867,6 +4111,86 @@ "Value":{ "shape":"string1To255", "documentation":"

    The value of the calculated attribute.

    " + }, + "LastObjectTimestamp":{ + "shape":"timestamp", + "documentation":"

    The timestamp of the newest object included in the calculated attribute calculation.

    " + } + } + }, + "GetDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + } + } + }, + "GetDomainLayoutResponse":{ + "type":"structure", + "required":[ + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "Layout", + "Version", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Version":{ + "shape":"string1To255", + "documentation":"

    The version used to create layout.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " } } }, @@ -4990,6 +5314,60 @@ "min":20, "pattern":"arn:aws:kms:.*:[0-9]+:.*" }, + "LayoutItem":{ + "type":"structure", + "required":[ + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then layout will not be used by default but it can be used to view data by explicit selection on UI.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under customer profiles domain.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " + } + }, + "documentation":"

    The layout object that contains LayoutDefinitionName, Description, DisplayName, IsDefault, LayoutType, Tags, CreatedAt, LastUpdatedAt

    " + }, + "LayoutList":{ + "type":"list", + "member":{"shape":"LayoutItem"} + }, + "LayoutType":{ + "type":"string", + "enum":["PROFILE_EXPLORER"] + }, "ListAccountIntegrationsRequest":{ "type":"structure", "required":["Uri"], @@ -5054,6 +5432,14 @@ "shape":"timestamp", "documentation":"

    The timestamp of when the calculated attribute definition was most recently edited.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed.)

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -5116,6 +5502,10 @@ "Value":{ "shape":"string1To255", "documentation":"

    The value of the calculated attribute.

    " + }, + "LastObjectTimestamp":{ + "shape":"timestamp", + "documentation":"

    The timestamp of the newest object included in the calculated attribute calculation.

    " } }, "documentation":"

    The details of a single calculated attribute for a profile.

    " @@ -5193,6 +5583,43 @@ }, "documentation":"

    An object in a list that represents a domain.

    " }, + "ListDomainLayoutsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

    Identifies the next page of results to return.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDomainLayoutsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"LayoutList", + "documentation":"

    Contains summary information about an EventStream.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    Identifies the next page of results to return.

    " + } + } + }, "ListDomainsRequest":{ "type":"structure", "members":{ @@ -6120,7 +6547,6 @@ }, "ObjectCount":{ "type":"integer", - "max":100, "min":1 }, "ObjectFilter":{ @@ -6283,7 +6709,7 @@ }, "AccountNumber":{ "shape":"sensitiveString1To255", - "documentation":"

    An account number that you have given to the customer.

    " + "documentation":"

    An account number that you have assigned to the customer.

    " }, "AdditionalInformation":{ "shape":"sensitiveString1To1000", @@ -6911,10 +7337,6 @@ }, "Range":{ "type":"structure", - "required":[ - "Value", - "Unit" - ], "members":{ "Value":{ "shape":"Value", @@ -6923,6 +7345,18 @@ "Unit":{ "shape":"Unit", "documentation":"

    The unit of time.

    " + }, + "ValueRange":{ + "shape":"ValueRange", + "documentation":"

    A structure letting customers specify a relative time window over which over which data is included in the Calculated Attribute. Use positive numbers to indicate that the endpoint is in the past, and negative numbers to indicate it is in the future. ValueRange overrides Value.

    " + }, + "TimestampSource":{ + "shape":"string1To255", + "documentation":"

    An expression specifying the field in your JSON object from which the date should be parsed. The expression should follow the structure of \\\"{ObjectTypeName.<Location of timestamp field in JSON pointer format>}\\\". E.g. if your object type is MyType and source JSON is {\"generatedAt\": {\"timestamp\": \"1737587945945\"}}, then TimestampSource should be \"{MyType.generatedAt.timestamp}\".

    " + }, + "TimestampFormat":{ + "shape":"string1To255", + "documentation":"

    The format the timestamp field in your JSON object is specified. This value should be one of EPOCHMILLI (for Unix epoch timestamps with second/millisecond level precision) or ISO_8601 (following ISO_8601 format with second/millisecond level precision, with an optional offset of Z or in the format HH:MM or HHMM.). E.g. if your object type is MyType and source JSON is {\"generatedAt\": {\"timestamp\": \"2001-07-04T12:08:56.235-0700\"}}, then TimestampFormat should be \"ISO_8601\".

    " } }, "documentation":"

    The relative time period over which data is included in the aggregation.

    " @@ -6953,6 +7387,29 @@ "type":"string", "enum":["DAYS"] }, + "Readiness":{ + "type":"structure", + "members":{ + "ProgressPercentage":{ + "shape":"percentageInteger", + "documentation":"

    Approximately how far the Calculated Attribute creation is from completion.

    " + }, + "Message":{ + "shape":"text", + "documentation":"

    Any customer messaging.

    " + } + }, + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, + "ReadinessStatus":{ + "type":"string", + "enum":[ + "PREPARING", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -7472,6 +7929,15 @@ "CASE", "ORDER", "COMMUNICATION_RECORD", + "AIR_PREFERENCE", + "HOTEL_PREFERENCE", + "AIR_BOOKING", + "AIR_SEGMENT", + "HOTEL_RESERVATION", + "HOTEL_STAY_REVENUE", + "LOYALTY", + "LOYALTY_TRANSACTION", + "LOYALTY_PROMOTION", "UNIQUE", "SECONDARY", "LOOKUP_ONLY", @@ -7482,11 +7948,7 @@ "type":"list", "member":{"shape":"StandardIdentifier"} }, - "Start":{ - "type":"integer", - "max":366, - "min":1 - }, + "Start":{"type":"integer"}, "Statistic":{ "type":"string", "enum":[ @@ -7570,8 +8032,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -7728,8 +8189,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAddress":{ "type":"structure", @@ -7852,6 +8312,104 @@ "shape":"AttributeDetails", "documentation":"

    The mathematical expression and a list of attribute items specified in that expression.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed.)

    " + }, + "Readiness":{ + "shape":"Readiness", + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "UpdateDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + } + } + }, + "UpdateDomainLayoutResponse":{ + "type":"structure", + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Version":{ + "shape":"string1To255", + "documentation":"

    The version used to create layout.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -8045,7 +8603,7 @@ }, "AccountNumber":{ "shape":"sensitiveString0To255", - "documentation":"

    An account number that you have given to the customer.

    " + "documentation":"

    An account number that you have assigned to the customer.

    " }, "PartyType":{ "shape":"PartyType", @@ -8145,8 +8703,8 @@ }, "Value":{ "type":"integer", - "max":366, - "min":1 + "max":2147483647, + "min":0 }, "ValueList":{ "type":"list", @@ -8154,6 +8712,26 @@ "max":10, "min":1 }, + "ValueRange":{ + "type":"structure", + "required":[ + "Start", + "End" + ], + "members":{ + "Start":{ + "shape":"ValueRangeStart", + "documentation":"

    The start time of when to include objects. Use positive numbers to indicate that the starting point is in the past, and negative numbers to indicate it is in the future.

    " + }, + "End":{ + "shape":"ValueRangeEnd", + "documentation":"

    The end time of when to include objects. Use positive numbers to indicate that the starting point is in the past, and negative numbers to indicate it is in the future.

    " + } + }, + "documentation":"

    A structure letting customers specify a relative time window over which over which data is included in the Calculated Attribute. Use positive numbers to indicate that the endpoint is in the past, and negative numbers to indicate it is in the future. ValueRange overrides Value.

    " + }, + "ValueRangeEnd":{"type":"integer"}, + "ValueRangeStart":{"type":"integer"}, "Values":{ "type":"list", "member":{"shape":"string1To255"}, @@ -8311,6 +8889,11 @@ }, "optionalBoolean":{"type":"boolean"}, "optionalLong":{"type":"long"}, + "percentageInteger":{ + "type":"integer", + "max":100, + "min":0 + }, "requestValueList":{ "type":"list", "member":{"shape":"string1To255"} @@ -8351,6 +8934,12 @@ "min":1, "sensitive":true }, + "sensitiveString1To2000000":{ + "type":"string", + "max":2000000, + "min":1, + "sensitive":true + }, "sensitiveString1To255":{ "type":"string", "max":255, diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index c723bff995bd..c17b0c7f4cfc 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index b8e3be49c7dd..38951d8ef431 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 4487bfb9009f..78d93066aaa9 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/dataexchange/src/main/resources/codegen-resources/service-2.json b/services/dataexchange/src/main/resources/codegen-resources/service-2.json index ead739dc3812..15dfab2322e5 100644 --- a/services/dataexchange/src/main/resources/codegen-resources/service-2.json +++ b/services/dataexchange/src/main/resources/codegen-resources/service-2.json @@ -1245,6 +1245,10 @@ "Event":{ "shape":"Event", "documentation":"

    What occurs to start an action.

    " + }, + "Tags":{ + "shape":"MapOf__string", + "documentation":"

    Key-value pairs that you can associate with the event action.

    " } } }, @@ -1271,6 +1275,10 @@ "shape":"Id", "documentation":"

    The unique identifier for the event action.

    " }, + "Tags":{ + "shape":"MapOf__string", + "documentation":"

    The tags for the event action.

    " + }, "UpdatedAt":{ "shape":"Timestamp", "documentation":"

    The date and time that the event action was last updated, in ISO 8601 format.

    " @@ -2236,6 +2244,10 @@ "shape":"Id", "documentation":"

    The unique identifier for the event action.

    " }, + "Tags":{ + "shape":"MapOf__string", + "documentation":"

    The tags for the event action.

    " + }, "UpdatedAt":{ "shape":"Timestamp", "documentation":"

    The date and time that the event action was last updated, in ISO 8601 format.

    " diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 47fdc29ab953..0c2d2425619a 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 9dc3c966231f..da68771b022f 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index 588c4a8473f9..13f6ff5f8641 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -56,7 +56,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Creates a transfer location for a Microsoft Azure Blob Storage container. DataSync can use this location as a transfer source or destination.

    Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.

    " + "documentation":"

    Creates a transfer location for a Microsoft Azure Blob Storage container. DataSync can use this location as a transfer source or destination. You can make transfers with or without a DataSync agent that connects to your container.

    Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types.

    " }, "CreateLocationEfs":{ "name":"CreateLocationEfs", @@ -168,7 +168,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Creates a transfer location for an object storage system. DataSync can use this location as a source or destination for transferring data.

    Before you begin, make sure that you understand the prerequisites for DataSync to work with object storage systems.

    " + "documentation":"

    Creates a transfer location for an object storage system. DataSync can use this location as a source or destination for transferring data. You can make transfers with or without a DataSync agent.

    Before you begin, make sure that you understand the prerequisites for DataSync to work with object storage systems.

    " }, "CreateLocationS3":{ "name":"CreateLocationS3", @@ -832,7 +832,10 @@ }, "AzureBlobAuthenticationType":{ "type":"string", - "enum":["SAS"] + "enum":[ + "SAS", + "NONE" + ] }, "AzureBlobContainerUrl":{ "type":"string", @@ -885,6 +888,20 @@ "type":"structure", "members":{} }, + "CmkSecretConfig":{ + "type":"structure", + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    Specifies the ARN for the DataSync-managed Secrets Manager secret that that is used to access a specific storage location. This property is generated by DataSync and is read-only. DataSync encrypts this secret with the KMS key that you specify for KmsKeyArn.

    " + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    Specifies the ARN for the customer-managed KMS key that DataSync uses to encrypt the DataSync-managed secret stored for SecretArn. DataSync provides this key to Secrets Manager.

    " + } + }, + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or secret key that DataSync uses to access a specific storage location, with a customer-managed KMS key.

    You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

    " + }, "CreateAgentRequest":{ "type":"structure", "required":["ActivationKey"], @@ -930,8 +947,7 @@ "type":"structure", "required":[ "ContainerUrl", - "AuthenticationType", - "AgentArns" + "AuthenticationType" ], "members":{ "ContainerUrl":{ @@ -944,7 +960,7 @@ }, "SasConfiguration":{ "shape":"AzureBlobSasConfiguration", - "documentation":"

    Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.

    " + "documentation":"

    Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.

    If you provide an authentication token using SasConfiguration, but do not provide secret configuration details using CmkSecretConfig or CustomSecretConfig, then DataSync stores the token using your Amazon Web Services account's secrets manager secret.

    " }, "BlobType":{ "shape":"AzureBlobType", @@ -960,11 +976,19 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    Make sure you configure this parameter correctly when you first create your storage location. You cannot add or remove agents from a storage location after you create it.

    " }, "Tags":{ "shape":"InputTagList", "documentation":"

    Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, which includes the authentication token that DataSync uses to access a specific AzureBlob storage location, with a customer-managed KMS key.

    When you include this paramater as part of a CreateLocationAzureBlob request, you provide only the KMS key ARN. DataSync uses this KMS key together with the authentication token you specify for SasConfiguration to create a DataSync-managed secret to store the location access credentials.

    Make sure the DataSync has permission to access the KMS key that you specify.

    You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed Secrets Manager secret where the authentication token for an AzureBlob storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

    You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

    " } } }, @@ -1298,8 +1322,7 @@ "type":"structure", "required":[ "ServerHostname", - "BucketName", - "AgentArns" + "BucketName" ], "members":{ "ServerHostname":{ @@ -1332,7 +1355,7 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    Make sure you configure this parameter correctly when you first create your storage location. You cannot add or remove agents from a storage location after you create it.

    " }, "Tags":{ "shape":"InputTagList", @@ -1341,6 +1364,14 @@ "ServerCertificate":{ "shape":"ObjectStorageCertificate", "documentation":"

    Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single .pem file with a full certificate chain (for example, file:///home/user/.ssh/object_storage_certificates.pem).

    The certificate chain might include:

    • The object storage system's certificate

    • All intermediate certificates (if there are any)

    • The root certificate of the signing CA

    You can concatenate your certificates into a .pem file (which can be up to 32768 bytes before base64 encoding). The following example cat command creates an object_storage_certificates.pem file that includes three certificates:

    cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem

    To use this parameter, configure ServerProtocol to HTTPS.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, which includes the SecretKey that DataSync uses to access a specific object storage location, with a customer-managed KMS key.

    When you include this paramater as part of a CreateLocationObjectStorage request, you provide only the KMS key ARN. DataSync uses this KMS key together with the value you specify for the SecretKey parameter to create a DataSync-managed secret to store the location access credentials.

    Make sure the DataSync has permission to access the KMS key that you specify.

    You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed Secrets Manager secret where the secret key for a specific object storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

    You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

    " } }, "documentation":"

    CreateLocationObjectStorageRequest

    " @@ -1522,7 +1553,7 @@ }, "TaskMode":{ "shape":"TaskMode", - "documentation":"

    Specifies one of the following task modes for your data transfer:

    • ENHANCED - Transfer virtually unlimited numbers of objects with higher performance than Basic mode. Enhanced mode tasks optimize the data transfer process by listing, preparing, transferring, and verifying data in parallel. Enhanced mode is currently available for transfers between Amazon S3 locations.

      To create an Enhanced mode task, the IAM role that you use to call the CreateTask operation must have the iam:CreateServiceLinkedRole permission.

    • BASIC (default) - Transfer files or objects between Amazon Web Services storage and all other supported DataSync locations. Basic mode tasks are subject to quotas on the number of files, objects, and directories in a dataset. Basic mode sequentially prepares, transfers, and verifies data, making it slower than Enhanced mode for most workloads.

    For more information, see Understanding task mode differences.

    " + "documentation":"

    Specifies one of the following task modes for your data transfer:

    • ENHANCED - Transfer virtually unlimited numbers of objects with higher performance than Basic mode. Enhanced mode tasks optimize the data transfer process by listing, preparing, transferring, and verifying data in parallel. Enhanced mode is currently available for transfers between Amazon S3 locations, transfers between Azure Blob and Amazon S3 without an agent, and transfers between other clouds and Amazon S3 without an agent.

      To create an Enhanced mode task, the IAM role that you use to call the CreateTask operation must have the iam:CreateServiceLinkedRole permission.

    • BASIC (default) - Transfer files or objects between Amazon Web Services storage and all other supported DataSync locations. Basic mode tasks are subject to quotas on the number of files, objects, and directories in a dataset. Basic mode sequentially prepares, transfers, and verifies data, making it slower than Enhanced mode for most workloads.

    For more information, see Understanding task mode differences.

    " } }, "documentation":"

    CreateTaskRequest

    " @@ -1537,6 +1568,20 @@ }, "documentation":"

    CreateTaskResponse

    " }, + "CustomSecretConfig":{ + "type":"structure", + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    Specifies the ARN for an Secrets Manager secret.

    " + }, + "SecretAccessRoleArn":{ + "shape":"IamRoleArnOrEmptyString", + "documentation":"

    Specifies the ARN for the Identity and Access Management role that DataSync uses to access the secret specified for SecretArn.

    " + } + }, + "documentation":"

    Specifies configuration information for a customer-managed Secrets Manager secret where a storage location authentication token or secret key is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

    You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

    " + }, "DeleteAgentRequest":{ "type":"structure", "required":["AgentArn"], @@ -1671,6 +1716,18 @@ "CreationTime":{ "shape":"Time", "documentation":"

    The time that your Azure Blob Storage transfer location was created.

    " + }, + "ManagedSecretConfig":{ + "shape":"ManagedSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token that DataSync uses to access a specific storage location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token that DataSync uses to access a specific storage location, with a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Describes configuration information for a customer-managed secret, such as an authentication token that DataSync uses to access a specific storage location, with a customer-managed KMS key.

    " } } }, @@ -2003,6 +2060,18 @@ "ServerCertificate":{ "shape":"ObjectStorageCertificate", "documentation":"

    The certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA).

    " + }, + "ManagedSecretConfig":{ + "shape":"ManagedSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Describes configuration information for a customer-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " } }, "documentation":"

    DescribeLocationObjectStorageResponse

    " @@ -2139,7 +2208,7 @@ }, "StartTime":{ "shape":"Time", - "documentation":"

    The time when the task execution started.

    " + "documentation":"

    The time that DataSync sends the request to start the task execution. For non-queued tasks, LaunchTime and StartTime are typically the same. For queued tasks, LaunchTime is typically later than StartTime because previously queued tasks must finish running before newer tasks can begin.

    " }, "EstimatedFilesToTransfer":{ "shape":"long", @@ -2208,6 +2277,14 @@ "FilesFailed":{ "shape":"TaskExecutionFilesFailedDetail", "documentation":"

    The number of objects that DataSync fails to prepare, transfer, verify, and delete during your task execution.

    Applies only to Enhanced mode tasks.

    " + }, + "LaunchTime":{ + "shape":"Time", + "documentation":"

    The time that the task execution actually begins. For non-queued tasks, LaunchTime and StartTime are typically the same. For queued tasks, LaunchTime is typically later than StartTime because previously queued tasks must finish running before newer tasks can begin.

    " + }, + "EndTime":{ + "shape":"Time", + "documentation":"

    The time that the transfer task ends.

    " } }, "documentation":"

    DescribeTaskExecutionResponse

    " @@ -2629,6 +2706,11 @@ "max":2048, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$" }, + "IamRoleArnOrEmptyString":{ + "type":"string", + "max":2048, + "pattern":"^(arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/[a-zA-Z0-9+=,.@_-]+|)$" + }, "InputTagList":{ "type":"list", "member":{"shape":"TagListEntry"}, @@ -2669,6 +2751,11 @@ "min":1, "pattern":"^.+$" }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "pattern":"^(arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):kms:[a-z\\-0-9]+:[0-9]{12}:key/.*|)$" + }, "KmsKeyProviderUri":{ "type":"string", "max":255, @@ -2908,6 +2995,16 @@ "TRANSFER" ] }, + "ManagedSecretConfig":{ + "type":"structure", + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    Specifies the ARN for an Secrets Manager secret.

    " + } + }, + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

    " + }, "ManifestAction":{ "type":"string", "enum":["TRANSFER"] @@ -3409,6 +3506,11 @@ "DISABLED" ] }, + "SecretArn":{ + "type":"string", + "max":2048, + "pattern":"^(arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):secretsmanager:[a-z\\-0-9]+:[0-9]{12}:secret:.*|)$" + }, "ServerHostname":{ "type":"string", "max":255, @@ -3952,7 +4054,15 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    You cannot add or remove agents from a storage location after you initially create it.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " } } }, @@ -4199,11 +4309,19 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    You cannot add or remove agents from a storage location after you initially create it.

    " }, "ServerCertificate":{ "shape":"ObjectStorageCertificate", "documentation":"

    Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single .pem file with a full certificate chain (for example, file:///home/user/.ssh/object_storage_certificates.pem).

    The certificate chain might include:

    • The object storage system's certificate

    • All intermediate certificates (if there are any)

    • The root certificate of the signing CA

    You can concatenate your certificates into a .pem file (which can be up to 32768 bytes before base64 encoding). The following example cat command creates an object_storage_certificates.pem file that includes three certificates:

    cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem

    To use this parameter, configure ServerProtocol to HTTPS.

    Updating this parameter doesn't interfere with tasks that you have in progress.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " } } }, @@ -4385,5 +4503,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"DataSync

    DataSync is an online data movement and discovery service that simplifies data migration and helps you quickly, easily, and securely transfer your file or object data to, from, and between Amazon Web Services storage services.

    This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide .

    " + "documentation":"DataSync

    DataSync is an online data movement service that simplifies data migration and helps you quickly, easily, and securely transfer your file or object data to, from, and between Amazon Web Services storage services.

    This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide .

    " } diff --git a/services/datazone/pom.xml b/services/datazone/pom.xml index 7d27fbece529..8cc768ea8c61 100644 --- a/services/datazone/pom.xml +++ b/services/datazone/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT datazone AWS Java SDK :: Services :: Data Zone diff --git a/services/dax/pom.xml b/services/dax/pom.xml index b23118ce527e..1ac0a9a55d75 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/deadline/pom.xml b/services/deadline/pom.xml index 5000e338daf0..37f84d0611f7 100644 --- a/services/deadline/pom.xml +++ b/services/deadline/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT deadline AWS Java SDK :: Services :: Deadline diff --git a/services/detective/pom.xml b/services/detective/pom.xml index 5f2b1283358f..fbc26de6378c 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 90a0bb0acd9b..79348f8fbbae 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 0a0ce3bd15a4..e99e9d771aa2 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 80a640e28709..f0ad135e19ca 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directory/pom.xml b/services/directory/pom.xml index cb1fc6d8d7b6..28d181687e07 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/directoryservicedata/pom.xml b/services/directoryservicedata/pom.xml index ae9af0d50f47..2b7475016bea 100644 --- a/services/directoryservicedata/pom.xml +++ b/services/directoryservicedata/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT directoryservicedata AWS Java SDK :: Services :: Directory Service Data diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index b8688307a59d..78ff60e9b4fa 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 7bd8d33c76a8..739acf997383 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 2db97a4d6188..198c641a5a14 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/drs/pom.xml b/services/drs/pom.xml index c1d20018ed21..9ebbe6a84183 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/dsql/pom.xml b/services/dsql/pom.xml index 9e7abe49624f..933b8a505bb6 100644 --- a/services/dsql/pom.xml +++ b/services/dsql/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT dsql AWS Java SDK :: Services :: DSQL diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index afb0cb5e164c..f22a9234867f 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index 65dcabe8e1f7..18c4b3969231 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index c956d6a76c08..489cb9448f77 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index eb2da9e359eb..8cdf1de7bfd4 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -410,7 +410,7 @@ }, "input":{"shape":"AttachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon EBS User Guide.

    After you attach an EBS volume, you must make it available. For more information, see Make an EBS volume available for use.

    If a volume has an Amazon Web Services Marketplace product code:

    • The volume can be attached only to a stopped instance.

    • Amazon Web Services Marketplace product codes are copied from the volume to the instance.

    • You must be subscribed to the product.

    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For more information, see Attach an Amazon EBS volume to an instance in the Amazon EBS User Guide.

    " + "documentation":"

    Attaches an Amazon EBS volume to a running or stopped instance, and exposes it to the instance with the specified device name.

    The maximum number of Amazon EBS volumes that you can attach to an instance depends on the instance type. If you exceed the volume attachment limit for an instance type, the attachment request fails with the AttachmentLimitExceeded error. For more information, see Instance volume limits.

    After you attach an EBS volume, you must make it available for use. For more information, see Make an EBS volume available for use.

    If a volume has an Amazon Web Services Marketplace product code:

    • The volume can be attached only to a stopped instance.

    • Amazon Web Services Marketplace product codes are copied from the volume to the instance.

    • You must be subscribed to the product.

    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For more information, see Attach an Amazon EBS volume to an instance in the Amazon EBS User Guide.

    " }, "AttachVpnGateway":{ "name":"AttachVpnGateway", @@ -528,7 +528,7 @@ }, "input":{"shape":"CancelImageLaunchPermissionRequest"}, "output":{"shape":"CancelImageLaunchPermissionResult"}, - "documentation":"

    Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more information, see Cancel having an AMI shared with your Amazon Web Services account in the Amazon EC2 User Guide.

    " + "documentation":"

    Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more information, see Cancel having an AMI shared with your Amazon Web Services account in the Amazon EC2 User Guide.

    " }, "CancelImportTask":{ "name":"CancelImportTask", @@ -598,7 +598,7 @@ }, "input":{"shape":"CopyImageRequest"}, "output":{"shape":"CopyImageResult"}, - "documentation":"

    Initiates an AMI copy operation. You can copy an AMI from one Region to another, or from a Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.

    When you copy an AMI from one Region to another, the destination Region is the current Region.

    When you copy an AMI from a Region to an Outpost, specify the ARN of the Outpost as the destination. Backing snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region or the key that you specify. Outposts do not support unencrypted snapshots.

    For information about the prerequisites when copying an AMI, see Copy an AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Initiates an AMI copy operation. You can copy an AMI from one Region to another, or from a Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.

    When you copy an AMI from one Region to another, the destination Region is the current Region.

    When you copy an AMI from a Region to an Outpost, specify the ARN of the Outpost as the destination. Backing snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region or the key that you specify. Outposts do not support unencrypted snapshots.

    For information about the prerequisites when copying an AMI, see Copy an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "CopySnapshot":{ "name":"CopySnapshot", @@ -788,7 +788,7 @@ }, "input":{"shape":"CreateImageRequest"}, "output":{"shape":"CreateImageResult"}, - "documentation":"

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Create an Amazon EBS-backed Linux AMI in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Create an Amazon EBS-backed AMI in the Amazon Elastic Compute Cloud User Guide.

    " }, "CreateInstanceConnectEndpoint":{ "name":"CreateInstanceConnectEndpoint", @@ -1107,7 +1107,7 @@ }, "input":{"shape":"CreateRestoreImageTaskRequest"}, "output":{"shape":"CreateRestoreImageTaskResult"}, - "documentation":"

    Starts a task that restores an AMI from an Amazon S3 object that was previously created by using CreateStoreImageTask.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using Amazon S3 in the Amazon EC2 User Guide.

    " + "documentation":"

    Starts a task that restores an AMI from an Amazon S3 object that was previously created by using CreateStoreImageTask.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using S3 in the Amazon EC2 User Guide.

    " }, "CreateRoute":{ "name":"CreateRoute", @@ -1207,7 +1207,7 @@ }, "input":{"shape":"CreateStoreImageTaskRequest"}, "output":{"shape":"CreateStoreImageTaskResult"}, - "documentation":"

    Stores an AMI as a single object in an Amazon S3 bucket.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using Amazon S3 in the Amazon EC2 User Guide.

    " + "documentation":"

    Stores an AMI as a single object in an Amazon S3 bucket.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using S3 in the Amazon EC2 User Guide.

    " }, "CreateSubnet":{ "name":"CreateSubnet", @@ -2367,7 +2367,7 @@ }, "input":{"shape":"DeregisterImageRequest"}, "output":{"shape":"DeregisterImageResult"}, - "documentation":"

    Deregisters the specified AMI. A deregistered AMI can't be used to launch new instances.

    If a deregistered EBS-backed AMI matches a Recycle Bin retention rule, it moves to the Recycle Bin for the specified retention period. It can be restored before its retention period expires, after which it is permanently deleted. If the deregistered AMI doesn't match a retention rule, it is permanently deleted immediately. For more information, see Recycle Bin in the Amazon EBS User Guide.

    Deregistering an AMI does not delete the following:

    • Instances already launched from the AMI. You'll continue to incur usage costs for the instances until you terminate them.

    • For EBS-backed AMIs: The snapshots that were created of the root and data volumes of the instance during AMI creation. You'll continue to incur snapshot storage costs.

    • For instance store-backed AMIs: The files uploaded to Amazon S3 during AMI creation. You'll continue to incur S3 storage costs.

    For more information, see Deregister an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Deregisters the specified AMI. A deregistered AMI can't be used to launch new instances.

    If a deregistered EBS-backed AMI matches a Recycle Bin retention rule, it moves to the Recycle Bin for the specified retention period. It can be restored before its retention period expires, after which it is permanently deleted. If the deregistered AMI doesn't match a retention rule, it is permanently deleted immediately. For more information, see Recover deleted Amazon EBS snapshots and EBS-backed AMIs with Recycle Bin in the Amazon EBS User Guide.

    When deregistering an EBS-backed AMI, you can optionally delete its associated snapshots at the same time. However, if a snapshot is associated with multiple AMIs, it won't be deleted even if specified for deletion, although the AMI will still be deregistered.

    Deregistering an AMI does not delete the following:

    • Instances already launched from the AMI. You'll continue to incur usage costs for the instances until you terminate them.

    • For EBS-backed AMIs: Snapshots that are associated with multiple AMIs. You'll continue to incur snapshot storage costs.

    • For instance store-backed AMIs: The files uploaded to Amazon S3 during AMI creation. You'll continue to incur S3 storage costs.

    For more information, see Deregister an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "DeregisterInstanceEventNotificationAttributes":{ "name":"DeregisterInstanceEventNotificationAttributes", @@ -3647,7 +3647,7 @@ }, "input":{"shape":"DescribeStoreImageTasksRequest"}, "output":{"shape":"DescribeStoreImageTasksResult"}, - "documentation":"

    Describes the progress of the AMI store tasks. You can describe the store tasks for specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from the last 31 days.

    For each AMI task, the response indicates if the task is InProgress, Completed, or Failed. For tasks InProgress, the response shows the estimated progress as a percentage.

    Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 days can be viewed.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using Amazon S3 in the Amazon EC2 User Guide.

    " + "documentation":"

    Describes the progress of the AMI store tasks. You can describe the store tasks for specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from the last 31 days.

    For each AMI task, the response indicates if the task is InProgress, Completed, or Failed. For tasks InProgress, the response shows the estimated progress as a percentage.

    Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 days can be viewed.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using S3 in the Amazon EC2 User Guide.

    " }, "DescribeSubnets":{ "name":"DescribeSubnets", @@ -4204,7 +4204,7 @@ }, "input":{"shape":"DisableImageBlockPublicAccessRequest"}, "output":{"shape":"DisableImageBlockPublicAccessResult"}, - "documentation":"

    Disables block public access for AMIs at the account level in the specified Amazon Web Services Region. This removes the block public access restriction from your account. With the restriction removed, you can publicly share your AMIs in the specified Amazon Web Services Region.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. When the API has completed the configuration, the response will be unblocked.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " + "documentation":"

    Disables block public access for AMIs at the account level in the specified Amazon Web Services Region. This removes the block public access restriction from your account. With the restriction removed, you can publicly share your AMIs in the specified Amazon Web Services Region.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. When the API has completed the configuration, the response will be unblocked.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " }, "DisableImageDeprecation":{ "name":"DisableImageDeprecation", @@ -4214,7 +4214,7 @@ }, "input":{"shape":"DisableImageDeprecationRequest"}, "output":{"shape":"DisableImageDeprecationResult"}, - "documentation":"

    Cancels the deprecation of the specified AMI.

    For more information, see Deprecate an AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Cancels the deprecation of the specified AMI.

    For more information, see Deprecate an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "DisableImageDeregistrationProtection":{ "name":"DisableImageDeregistrationProtection", @@ -4224,7 +4224,7 @@ }, "input":{"shape":"DisableImageDeregistrationProtectionRequest"}, "output":{"shape":"DisableImageDeregistrationProtectionResult"}, - "documentation":"

    Disables deregistration protection for an AMI. When deregistration protection is disabled, the AMI can be deregistered.

    If you chose to include a 24-hour cooldown period when you enabled deregistration protection for the AMI, then, when you disable deregistration protection, you won’t immediately be able to deregister the AMI.

    For more information, see Protect an AMI from deregistration in the Amazon EC2 User Guide.

    " + "documentation":"

    Disables deregistration protection for an AMI. When deregistration protection is disabled, the AMI can be deregistered.

    If you chose to include a 24-hour cooldown period when you enabled deregistration protection for the AMI, then, when you disable deregistration protection, you won’t immediately be able to deregister the AMI.

    For more information, see Protect an Amazon EC2 AMI from deregistration in the Amazon EC2 User Guide.

    " }, "DisableIpamOrganizationAdminAccount":{ "name":"DisableIpamOrganizationAdminAccount", @@ -4551,7 +4551,7 @@ }, "input":{"shape":"EnableImageRequest"}, "output":{"shape":"EnableImageResult"}, - "documentation":"

    Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services accounts, organizations, and Organizational Units that lost access to the AMI when it was disabled do not regain access automatically. Once the AMI is available, it can be shared with them again.

    Only the AMI owner can re-enable a disabled AMI.

    For more information, see Disable an AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services accounts, organizations, and Organizational Units that lost access to the AMI when it was disabled do not regain access automatically. Once the AMI is available, it can be shared with them again.

    Only the AMI owner can re-enable a disabled AMI.

    For more information, see Disable an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "EnableImageBlockPublicAccess":{ "name":"EnableImageBlockPublicAccess", @@ -4561,7 +4561,7 @@ }, "input":{"shape":"EnableImageBlockPublicAccessRequest"}, "output":{"shape":"EnableImageBlockPublicAccessResult"}, - "documentation":"

    Enables block public access for AMIs at the account level in the specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already have public AMIs, they will remain publicly available.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be unblocked. When the API has completed the configuration, the response will be block-new-sharing.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " + "documentation":"

    Enables block public access for AMIs at the account level in the specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already have public AMIs, they will remain publicly available.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be unblocked. When the API has completed the configuration, the response will be block-new-sharing.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " }, "EnableImageDeprecation":{ "name":"EnableImageDeprecation", @@ -4581,7 +4581,7 @@ }, "input":{"shape":"EnableImageDeregistrationProtectionRequest"}, "output":{"shape":"EnableImageDeregistrationProtectionResult"}, - "documentation":"

    Enables deregistration protection for an AMI. When deregistration protection is enabled, the AMI can't be deregistered.

    To allow the AMI to be deregistered, you must first disable deregistration protection using DisableImageDeregistrationProtection.

    For more information, see Protect an AMI from deregistration in the Amazon EC2 User Guide.

    " + "documentation":"

    Enables deregistration protection for an AMI. When deregistration protection is enabled, the AMI can't be deregistered.

    To allow the AMI to be deregistered, you must first disable deregistration protection using DisableImageDeregistrationProtection.

    For more information, see Protect an Amazon EC2 AMI from deregistration in the Amazon EC2 User Guide.

    " }, "EnableIpamOrganizationAdminAccount":{ "name":"EnableIpamOrganizationAdminAccount", @@ -4899,7 +4899,7 @@ }, "input":{"shape":"GetImageBlockPublicAccessStateRequest"}, "output":{"shape":"GetImageBlockPublicAccessStateResult"}, - "documentation":"

    Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " + "documentation":"

    Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " }, "GetInstanceMetadataDefaults":{ "name":"GetInstanceMetadataDefaults", @@ -6229,7 +6229,7 @@ }, "input":{"shape":"RegisterImageRequest"}, "output":{"shape":"RegisterImageResult"}, - "documentation":"

    Registers an AMI. When you're creating an instance-store backed AMI, registering the AMI is the final step in the creation process. For more information about creating AMIs, see Create an AMI from a snapshot and Create an instance-store backed AMI in the Amazon EC2 User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself. We recommend that you always use CreateImage unless you have a specific reason to use RegisterImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    Register a snapshot of a root device volume

    You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using a block device mapping. You can't set the encryption state of the volume using the block device mapping. If the snapshot is encrypted, or encryption by default is enabled, the root volume of an instance launched from the AMI is encrypted.

    For more information, see Create an AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs in the Amazon EC2 User Guide.

    Amazon Web Services Marketplace product codes

    If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new AMI.

    In most cases, AMIs for Windows, RedHat, SUSE, and SQL Server require correct licensing information to be present on the AMI. For more information, see Understand AMI billing information in the Amazon EC2 User Guide. When creating an AMI from a snapshot, the RegisterImage operation derives the correct billing information from the snapshot's metadata, but this requires the appropriate metadata to be present. To verify if the correct billing information was applied, check the PlatformDetails field on the new AMI. If the field is empty or doesn't match the expected operating system code (for example, Windows, RedHat, SUSE, or SQL), the AMI creation was unsuccessful, and you should discard the AMI and instead create the AMI from an instance using CreateImage. For more information, see Create an AMI from an instance in the Amazon EC2 User Guide.

    If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Understand AMI billing information in the Amazon EC2 User Guide.

    " + "documentation":"

    Registers an AMI. When you're creating an instance-store backed AMI, registering the AMI is the final step in the creation process. For more information about creating AMIs, see Create an AMI from a snapshot and Create an instance-store backed AMI in the Amazon EC2 User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself. We recommend that you always use CreateImage unless you have a specific reason to use RegisterImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    Register a snapshot of a root device volume

    You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using a block device mapping. You can't set the encryption state of the volume using the block device mapping. If the snapshot is encrypted, or encryption by default is enabled, the root volume of an instance launched from the AMI is encrypted.

    For more information, see Create an AMI from a snapshot and Use encryption with EBS-backed AMIs in the Amazon EC2 User Guide.

    Amazon Web Services Marketplace product codes

    If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new AMI.

    In most cases, AMIs for Windows, RedHat, SUSE, and SQL Server require correct licensing information to be present on the AMI. For more information, see Understand AMI billing information in the Amazon EC2 User Guide. When creating an AMI from a snapshot, the RegisterImage operation derives the correct billing information from the snapshot's metadata, but this requires the appropriate metadata to be present. To verify if the correct billing information was applied, check the PlatformDetails field on the new AMI. If the field is empty or doesn't match the expected operating system code (for example, Windows, RedHat, SUSE, or SQL), the AMI creation was unsuccessful, and you should discard the AMI and instead create the AMI from an instance using CreateImage. For more information, see Create an AMI from an instance in the Amazon EC2 User Guide.

    If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Understand AMI billing information in the Amazon EC2 User Guide.

    " }, "RegisterInstanceEventNotificationAttributes":{ "name":"RegisterInstanceEventNotificationAttributes", @@ -6541,7 +6541,7 @@ }, "input":{"shape":"RestoreImageFromRecycleBinRequest"}, "output":{"shape":"RestoreImageFromRecycleBinResult"}, - "documentation":"

    Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon EC2 User Guide.

    " + "documentation":"

    Restores an AMI from the Recycle Bin. For more information, see Recover deleted Amazon EBS snapshots and EBS-back AMIs with Recycle Bin in the Amazon EC2 User Guide.

    " }, "RestoreManagedPrefixListVersion":{ "name":"RestoreManagedPrefixListVersion", @@ -7604,6 +7604,11 @@ "documentation":"

    The carrier IP address associated. This option is only available for network interfaces which reside in a subnet in a Wavelength Zone (for example an EC2 instance).

    ", "locationName":"carrierIp" }, + "SubnetId":{ + "shape":"String", + "documentation":"

    The ID of the subnet where the IP address is allocated.

    ", + "locationName":"subnetId" + }, "ServiceManaged":{ "shape":"ServiceManaged", "documentation":"

    The service that manages the elastic IP address.

    The only option supported today is alb.

    ", @@ -9465,6 +9470,13 @@ "locationName":"item" } }, + "AssociatedSubnetList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"item" + } + }, "AssociatedTargetNetwork":{ "type":"structure", "members":{ @@ -13267,7 +13279,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference.

    ", + "documentation":"

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in Amazon EC2 API requests in the Amazon EC2 API Reference.

    ", "idempotencyToken":true }, "Description":{ @@ -13311,7 +13323,7 @@ }, "SnapshotCopyCompletionDurationMinutes":{ "shape":"Long", - "documentation":"

    Specify a completion duration, in 15 minute increments, to initiate a time-based AMI copy. The specified completion duration applies to each of the snapshots associated with the AMI. Each snapshot associated with the AMI will be completed within the specified completion duration, with copy throughput automatically adjusted for each snapshot based on its size to meet the timing target.

    If you do not specify a value, the AMI copy operation is completed on a best-effort basis.

    For more information, see Time-based copies.

    " + "documentation":"

    Specify a completion duration, in 15 minute increments, to initiate a time-based AMI copy. The specified completion duration applies to each of the snapshots associated with the AMI. Each snapshot associated with the AMI will be completed within the specified completion duration, with copy throughput automatically adjusted for each snapshot based on its size to meet the timing target.

    If you do not specify a value, the AMI copy operation is completed on a best-effort basis.

    For more information, see Time-based copies for Amazon EBS snapshots and EBS-backed AMIs.

    " }, "DryRun":{ "shape":"Boolean", @@ -20115,6 +20127,29 @@ } } }, + "DeleteSnapshotResultSet":{ + "type":"list", + "member":{ + "shape":"DeleteSnapshotReturnCode", + "locationName":"item" + } + }, + "DeleteSnapshotReturnCode":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot.

    ", + "locationName":"snapshotId" + }, + "ReturnCode":{ + "shape":"SnapshotReturnCodes", + "documentation":"

    The result code from the snapshot deletion attempt. Possible values:

    • success - The snapshot was successfully deleted.

    • skipped - The snapshot was not deleted because it's associated with other AMIs.

    • missing-permissions - The snapshot was not deleted because the role lacks DeleteSnapshot permissions. For more information, see How Amazon EBS works with IAM.

    • internal-error - The snapshot was not deleted due to a server error.

    • client-error - The snapshot was not deleted due to a client configuration error.

    For details about an error, check the DeleteSnapshot event in the CloudTrail event history. For more information, see View event history in the Amazon Web Services CloudTrail User Guide.

    ", + "locationName":"returnCode" + } + }, + "documentation":"

    The snapshot ID and its deletion result code.

    " + }, "DeleteSpotDatafeedSubscriptionRequest":{ "type":"structure", "members":{ @@ -21015,6 +21050,10 @@ "shape":"ImageId", "documentation":"

    The ID of the AMI.

    " }, + "DeleteAssociatedSnapshots":{ + "shape":"Boolean", + "documentation":"

    Specifies whether to delete the snapshots associated with the AMI during deregistration.

    If a snapshot is associated with multiple AMIs, it is not deleted, regardless of this setting.

    Default: The snapshots are not deleted.

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -21026,7 +21065,17 @@ "DeregisterImageResult":{ "type":"structure", "members":{ -} + "Return":{ + "shape":"Boolean", + "documentation":"

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "locationName":"return" + }, + "DeleteSnapshotResults":{ + "shape":"DeleteSnapshotResultSet", + "documentation":"

    The deletion result for each snapshot associated with the AMI, including the snapshot ID and its success or error code.

    ", + "locationName":"deleteSnapshotResultSet" + } + } }, "DeregisterInstanceEventNotificationAttributesRequest":{ "type":"structure", @@ -25262,7 +25311,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

    • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

    • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface (IPv4).

    • attachment.attach-time - The time that the network interface was attached to an instance.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

    • interface-type - The type of network interface (api_gateway_managed | aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efa-only | efs | gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint).

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • operator.managed - A Boolean that indicates whether this is a managed network interface.

    • operator.principal - The principal that manages the network interface. Only valid for managed network interfaces, where managed is true.

    • owner-id - The Amazon Web Services account ID of the network interface owner.

    • private-dns-name - The private DNS name of the network interface (IPv4).

    • private-ip-address - The private IPv4 address or addresses of the network interface.

    • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "documentation":"

    One or more filters.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

    • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

    • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface (IPv4).

    • attachment.attach-time - The time that the network interface was attached to an instance.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

    • interface-type - The type of network interface (api_gateway_managed | aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efa-only | efs | evs | gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint).

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • operator.managed - A Boolean that indicates whether this is a managed network interface.

    • operator.principal - The principal that manages the network interface. Only valid for managed network interfaces, where managed is true.

    • owner-id - The Amazon Web Services account ID of the network interface owner.

    • private-dns-name - The private DNS name of the network interface (IPv4).

    • private-ip-address - The private IPv4 address or addresses of the network interface.

    • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • vpc-id - The ID of the VPC for the network interface.

    ", "locationName":"filter" } }, @@ -36910,7 +36959,7 @@ }, "BootMode":{ "shape":"BootModeValues", - "documentation":"

    The boot mode of the image. For more information, see Boot modes in the Amazon EC2 User Guide.

    ", + "documentation":"

    The boot mode of the image. For more information, see Instance launch behavior with Amazon EC2 boot modes in the Amazon EC2 User Guide.

    ", "locationName":"bootMode" }, "TpmSupport":{ @@ -36950,12 +36999,12 @@ }, "SourceImageId":{ "shape":"String", - "documentation":"

    The ID of the source AMI from which the AMI was created.

    The ID only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The ID does not appear if the AMI was created using any other API. For some older AMIs, the ID might not be available. For more information, see Identify the source AMI used to create a new AMI in the Amazon EC2 User Guide.

    ", + "documentation":"

    The ID of the source AMI from which the AMI was created.

    The ID only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The ID does not appear if the AMI was created using any other API. For some older AMIs, the ID might not be available. For more information, see Identify the source AMI used to create a new Amazon EC2 AMI in the Amazon EC2 User Guide.

    ", "locationName":"sourceImageId" }, "SourceImageRegion":{ "shape":"String", - "documentation":"

    The Region of the source AMI.

    The Region only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The Region does not appear if the AMI was created using any other API. For some older AMIs, the Region might not be available. For more information, see Identify the source AMI used to create a new AMI in the Amazon EC2 User Guide.

    ", + "documentation":"

    The Region of the source AMI.

    The Region only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The Region does not appear if the AMI was created using any other API. For some older AMIs, the Region might not be available. For more information, see Identify the source AMI used to create a new Amazon EC2 AMI in the Amazon EC2 User Guide.

    ", "locationName":"sourceImageRegion" }, "ImageId":{ @@ -37056,7 +37105,7 @@ }, "UefiData":{ "shape":"AttributeValue", - "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot in the Amazon EC2 User Guide.

    ", + "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot for Amazon EC2 instances in the Amazon EC2 User Guide.

    ", "locationName":"uefiData" }, "LastLaunchedTime":{ @@ -39518,7 +39567,7 @@ }, "InterfaceType":{ "shape":"String", - "documentation":"

    The type of network interface.

    Valid values: interface | efa | efa-only | trunk

    ", + "documentation":"

    The type of network interface.

    Valid values: interface | efa | efa-only | evs | trunk

    ", "locationName":"interfaceType" }, "Ipv4Prefixes":{ @@ -47973,6 +48022,11 @@ "shape":"Boolean", "documentation":"

    Indicates whether to assign a public IPv4 address to a network interface. This option can be enabled for any network interface but will only apply to the primary network interface (eth0).

    " }, + "AssociatedSubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

    A list of subnet IDs to associate with the network interface.

    ", + "locationName":"AssociatedSubnetId" + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -51080,6 +51134,11 @@ "shape":"OperatorResponse", "documentation":"

    The service provider that manages the network interface.

    ", "locationName":"operator" + }, + "AssociatedSubnets":{ + "shape":"AssociatedSubnetList", + "documentation":"

    The subnets associated with this network interface.

    ", + "locationName":"associatedSubnetSet" } }, "documentation":"

    Describes a network interface.

    " @@ -54102,16 +54161,16 @@ "members":{ "ImageLocation":{ "shape":"String", - "documentation":"

    The full path to your AMI manifest in Amazon S3 storage. The specified bucket must have the aws-exec-read canned access control list (ACL) to ensure that it can be accessed by Amazon EC2. For more information, see Canned ACLs in the Amazon S3 Service Developer Guide.

    " + "documentation":"

    The full path to your AMI manifest in Amazon S3 storage. The specified bucket must have the aws-exec-read canned access control list (ACL) to ensure that it can be accessed by Amazon EC2. For more information, see Canned ACL in the Amazon S3 Service Developer Guide.

    " }, "BillingProducts":{ "shape":"BillingProductList", - "documentation":"

    The billing product codes. Your account must be authorized to specify billing product codes.

    If your account is not authorized to specify billing product codes, you can publish AMIs that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller on the Amazon Web Services Marketplace. For more information, see Getting started as a seller and AMI-based products in the Amazon Web Services Marketplace Seller Guide.

    ", + "documentation":"

    The billing product codes. Your account must be authorized to specify billing product codes.

    If your account is not authorized to specify billing product codes, you can publish AMIs that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller on the Amazon Web Services Marketplace. For more information, see Getting started as an Amazon Web Services Marketplace seller and AMI-based products in Amazon Web Services Marketplace in the Amazon Web Services Marketplace Seller Guide.

    ", "locationName":"BillingProduct" }, "BootMode":{ "shape":"BootModeValues", - "documentation":"

    The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS.

    The operating system contained in the AMI must be configured to support the specified boot mode.

    For more information, see Boot modes in the Amazon EC2 User Guide.

    " + "documentation":"

    The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS.

    The operating system contained in the AMI must be configured to support the specified boot mode.

    For more information, see Instance launch behavior with Amazon EC2 boot modes in the Amazon EC2 User Guide.

    " }, "TpmSupport":{ "shape":"TpmSupportValues", @@ -54119,7 +54178,7 @@ }, "UefiData":{ "shape":"StringType", - "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot in the Amazon EC2 User Guide.

    " + "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot for Amazon EC2 instances in the Amazon EC2 User Guide.

    " }, "ImdsSupport":{ "shape":"ImdsSupportValues", @@ -54167,7 +54226,7 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappingRequestList", - "documentation":"

    The block device mapping entries.

    If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

    If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide.

    ", + "documentation":"

    The block device mapping entries.

    If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

    If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, Create AMIs from local snapshots in the Amazon EBS User Guide.

    ", "locationName":"BlockDeviceMapping" }, "VirtualizationType":{ @@ -60085,6 +60144,16 @@ "locationName":"item" } }, + "SnapshotReturnCodes":{ + "type":"string", + "enum":[ + "success", + "skipped", + "missing-permissions", + "internal-error", + "client-error" + ] + }, "SnapshotSet":{ "type":"list", "member":{ @@ -61548,6 +61617,11 @@ "documentation":"

    The state of VPC Block Public Access (BPA).

    ", "locationName":"blockPublicAccessStates" }, + "Type":{ + "shape":"String", + "documentation":"

    Indicates if this is a subnet used with Amazon Elastic VMware Service (EVS). Possible values are Elastic VMware Service or no value. For more information about Amazon EVS, see Amazon Elastic VMware Service API Reference .

    ", + "locationName":"type" + }, "SubnetId":{ "shape":"String", "documentation":"

    The ID of the subnet.

    ", @@ -61555,7 +61629,7 @@ }, "State":{ "shape":"SubnetState", - "documentation":"

    The current state of the subnet.

    ", + "documentation":"

    The current state of the subnet.

    • failed: The underlying infrastructure to support the subnet failed to provision as expected.

    • failed-insufficient-capacity: The underlying infrastructure to support the subnet failed to provision due to a shortage of EC2 instance capacity.

    ", "locationName":"state" }, "VpcId":{ @@ -61724,6 +61798,13 @@ } }, "SubnetId":{"type":"string"}, + "SubnetIdList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"AssociatedSubnetId" + } + }, "SubnetIdStringList":{ "type":"list", "member":{ @@ -61804,7 +61885,9 @@ "enum":[ "pending", "available", - "unavailable" + "unavailable", + "failed", + "failed-insufficient-capacity" ] }, "Subscription":{ diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index 4f75d804cc63..38ad8884ef6b 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 1f217f365164..ad3ba06a6fe8 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecr/src/main/resources/codegen-resources/service-2.json b/services/ecr/src/main/resources/codegen-resources/service-2.json index 2c5424fb7749..84695aafe0fd 100644 --- a/services/ecr/src/main/resources/codegen-resources/service-2.json +++ b/services/ecr/src/main/resources/codegen-resources/service-2.json @@ -304,7 +304,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"ImageNotFoundException"} ], - "documentation":"

    Returns metadata about the images in a repository.

    Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

    " + "documentation":"

    Returns metadata about the images in a repository.

    Starting with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size. Therefore, Docker might return a larger image than the image shown in the Amazon Web Services Management Console.

    The new version of Amazon ECR Basic Scanning doesn't use the ImageDetail$imageScanFindingsSummary and ImageDetail$imageScanStatus attributes from the API response to return scan results. Use the DescribeImageScanFindings API instead. For more information about Amazon Web Services native basic scanning, see Scan images for software vulnerabilities in Amazon ECR.

    " }, "DescribePullThroughCacheRules":{ "name":"DescribePullThroughCacheRules", @@ -918,6 +918,14 @@ "shape":"Date", "documentation":"

    The date and time the Amazon ECR container image was pushed.

    " }, + "lastInUseAt":{ + "shape":"Date", + "documentation":"

    The most recent date and time a cluster was running the image.

    " + }, + "inUseCount":{ + "shape":"InUseCount", + "documentation":"

    The number of Amazon ECS or Amazon EKS clusters currently running the image.

    " + }, "registry":{ "shape":"RegistryId", "documentation":"

    The registry the Amazon ECR container image belongs to.

    " @@ -1135,7 +1143,7 @@ }, "upstreamRegistryUrl":{ "shape":"Url", - "documentation":"

    The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry.

    • Amazon ECR (ecr) – dkr.ecr.<region>.amazonaws.com

    • Amazon ECR Public (ecr-public) – public.ecr.aws

    • Docker Hub (docker-hub) – registry-1.docker.io

    • GitHub Container Registry (github-container-registry) – ghcr.io

    • GitLab Container Registry (gitlab-container-registry) – registry.gitlab.com

    • Kubernetes (k8s) – registry.k8s.io

    • Microsoft Azure Container Registry (azure-container-registry) – <custom>.azurecr.io

    • Quay (quay) – quay.io

    " + "documentation":"

    The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry.

    • Amazon ECR (ecr) – <accountId>.dkr.ecr.<region>.amazonaws.com

    • Amazon ECR Public (ecr-public) – public.ecr.aws

    • Docker Hub (docker-hub) – registry-1.docker.io

    • GitHub Container Registry (github-container-registry) – ghcr.io

    • GitLab Container Registry (gitlab-container-registry) – registry.gitlab.com

    • Kubernetes (k8s) – registry.k8s.io

    • Microsoft Azure Container Registry (azure-container-registry) – <custom>.azurecr.io

    • Quay (quay) – quay.io

    " }, "registryId":{ "shape":"RegistryId", @@ -1459,8 +1467,7 @@ }, "DeleteRegistryPolicyRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRegistryPolicyResponse":{ "type":"structure", @@ -1729,8 +1736,7 @@ }, "DescribeRegistryRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeRegistryResponse":{ "type":"structure", @@ -2013,7 +2019,7 @@ "members":{ "authorizationData":{ "shape":"AuthorizationDataList", - "documentation":"

    A list of authorization token data objects that correspond to the registryIds values in the request.

    " + "documentation":"

    A list of authorization token data objects that correspond to the registryIds values in the request.

    The size of the authorization token returned by Amazon ECR is not fixed. We recommend that you don't make assumptions about the maximum size.

    " } } }, @@ -2073,7 +2079,7 @@ }, "maxResults":{ "shape":"LifecyclePreviewMaxResults", - "documentation":"

    The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a single page along with a nextToken
 response element. The remaining results of the initial request can be seen by sending
 another GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. This value can be between 1 and 1000. If this
 parameter is not used, then GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken value, if
 applicable. This option cannot be used when you specify images with imageIds.

    " + "documentation":"

    The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a single page along with a nextToken
 response element. The remaining results of the initial request can be seen by sending
 another GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. This value can be between 1 and 100. If this
 parameter is not used, then GetLifecyclePolicyPreviewRequest returns up to
100 results and a nextToken value, if
 applicable. This option cannot be used when you specify images with imageIds.

    " }, "filter":{ "shape":"LifecyclePolicyPreviewFilter", @@ -2151,8 +2157,7 @@ }, "GetRegistryPolicyRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetRegistryPolicyResponse":{ "type":"structure", @@ -2169,8 +2174,7 @@ }, "GetRegistryScanningConfigurationRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetRegistryScanningConfigurationResponse":{ "type":"structure", @@ -2282,7 +2286,7 @@ }, "imageSizeInBytes":{ "shape":"ImageSizeInBytes", - "documentation":"

    The size, in bytes, of the image in the repository.

    If the image is a manifest list, this will be the max size of all manifests in the list.

    Starting with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size. Therefore, Docker might return a larger image than the image sizes returned by DescribeImages.

    " + "documentation":"

    The size, in bytes, of the image in the repository.

    If the image is a manifest list, this will be the max size of all manifests in the list.

    Starting with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size. Therefore, Docker might return a larger image than the image shown in the Amazon Web Services Management Console.

    " }, "imagePushedAt":{ "shape":"PushTimestamp", @@ -2552,6 +2556,10 @@ "type":"list", "member":{"shape":"ImageTag"} }, + "InUseCount":{ + "type":"long", + "min":0 + }, "InitiateLayerUploadRequest":{ "type":"structure", "required":["repositoryName"], @@ -3627,7 +3635,7 @@ }, "imageTagMutability":{ "shape":"ImageTagMutability", - "documentation":"

    The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

    " + "documentation":"

    The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

    " }, "repositoryPolicy":{ "shape":"RepositoryPolicyText", @@ -4129,8 +4137,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagStatus":{ "type":"string", @@ -4239,8 +4246,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdatePullThroughCacheRuleRequest":{ "type":"structure", diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 1fead8774823..fc1832403331 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index e7e175b4cc59..130beea0057b 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index c0d76595f56a..28435522f6a1 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -970,7 +970,7 @@ {"shape":"NamespaceNotFoundException"}, {"shape":"UnsupportedFeatureException"} ], - "documentation":"

    Modifies the parameters of a service.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

    For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

    You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

    If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

    You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

    • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

    • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

    When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

    • Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.

    • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.

      • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

      • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

    • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

    • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

    You must have a service-linked role when you update any of the following service properties:

    • loadBalancers,

    • serviceRegistries

    For more information about the role see the CreateService request parameter role .

    " + "documentation":"

    Modifies the parameters of a service.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

    For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

    You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

    If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

    You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

    • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

    • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

    When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

    • Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.

    • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.

      • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

      • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

    • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

    • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

    " }, "UpdateServicePrimaryTaskSet":{ "name":"UpdateServicePrimaryTaskSet", @@ -3520,7 +3520,7 @@ "documentation":"

    The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod is off. This value applies only when you specify a command.

    If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

    " } }, - "documentation":"

    An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

    The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

    You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

    The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

    Amazon ECS performs health checks on containers with the default that launched the container instance or the task.

    The following describes the possible healthStatus values for a container:

    • HEALTHY-The container health check has passed successfully.

    • UNHEALTHY-The container health check has failed.

    • UNKNOWN-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.

    The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

    • UNHEALTHY-One or more essential containers have failed their health check.

    • UNKNOWN-Any essential container running within the task is in an UNKNOWN state and no other essential containers have an UNHEALTHY state.

    • HEALTHY-All essential containers within the task have passed their health checks.

    Consider the following task health example with 2 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, the task health is HEALTHY.

    Consider the following task health example with 3 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is HEALTHY.

    If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

    The following are notes about container health check support:

    • If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.

    • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.

    • Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions.

    • Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.

    For an example of how to specify a task definition with multiple containers where container dependency is specified, see Container dependency in the Amazon Elastic Container Service Developer Guide.

    " + "documentation":"

    An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

    The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

    You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

    The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

    Amazon ECS performs health checks on containers with the default that launched the container instance or the task.

    The following describes the possible healthStatus values for a container:

    • HEALTHY-The container health check has passed successfully.

    • UNHEALTHY-The container health check has failed.

    • UNKNOWN-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.

    The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

    • UNHEALTHY-One or more essential containers have failed their health check.

    • UNKNOWN-Any essential container running within the task is in an UNKNOWN state and no other essential containers have an UNHEALTHY state.

    • HEALTHY-All essential containers within the task have passed their health checks.

    Consider the following task health example with 2 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, the task health is HEALTHY.

    Consider the following task health example with 3 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is HEALTHY.

    If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

    When a container health check fails for a task that is part of a service, the following process occurs:

    1. The task is marked as UNHEALTHY.

    2. The unhealthy task will be stopped, and during the stopping process, it will go through the following states:

      • DEACTIVATING - In this state, Amazon ECS performs additional steps before stopping the task. For example, for tasks that are part of services configured to use Elastic Load Balancing target groups, target groups will be deregistered in this state.

      • STOPPING - The task is in the process of being stopped.

      • DEPROVISIONING - Resources associated with the task are being cleaned up.

      • STOPPED - The task has been completely stopped.

    3. After the old task stops, a new task will be launched to ensure service operation, and the new task will go through the following lifecycle:

      • PROVISIONING - Resources required for the task are being provisioned.

      • PENDING - The task is waiting to be placed on a container instance.

      • ACTIVATING - In this state, Amazon ECS pulls container images, creates containers, configures task networking, registers load balancer target groups, and configures service discovery status.

      • RUNNING - The task is running and performing its work.

    For more detailed information about task lifecycle states, see Task lifecycle in the Amazon Elastic Container Service Developer Guide.

    The following are notes about container health check support:

    • If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.

    • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.

    • Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions.

    • Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.

    For an example of how to specify a task definition with multiple containers where container dependency is specified, see Container dependency in the Amazon Elastic Container Service Developer Guide.

    " }, "HealthStatus":{ "type":"string", @@ -4179,7 +4179,7 @@ }, "options":{ "shape":"LogConfigurationOptionsMap", - "documentation":"

    The configuration options to send to the log driver.

    The options you can specify depend on the log driver. Some of the options you can specify when you use the awslogs log driver to route logs to Amazon CloudWatch include the following:

    awslogs-create-group

    Required: No

    Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false.

    Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group.

    awslogs-region

    Required: Yes

    Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.

    awslogs-group

    Required: Yes

    Make sure to specify a log group that the awslogs log driver sends its log streams to.

    awslogs-stream-prefix

    Required: Yes, when using Fargate.Optional when using EC2.

    Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id.

    If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.

    For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.

    You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.

    awslogs-datetime-format

    Required: No

    This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.

    For more information, see awslogs-datetime-format.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    awslogs-multiline-pattern

    Required: No

    This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    For more information, see awslogs-multiline-pattern.

    This option is ignored if awslogs-datetime-format is also configured.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    The following options apply to all supported log drivers.

    mode

    Required: No

    Valid values: non-blocking | blocking

    This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted.

    If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.

    If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

    You can set a default mode for all containers in a specific Amazon Web Services Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide.

    max-buffer-size

    Required: No

    Default value: 1m

    When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.

    To route logs using the splunk log router, you need to specify a splunk-token and a splunk-url.

    When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network destination for log storage and analytics, you can set the log-driver-buffer-limit option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.

    Other options you can specify when using awsfirelens to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with region and a name for the log stream with delivery_stream.

    When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a data stream name with stream.

    When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host (OpenSearch Service endpoint without protocol), Port, Index, Type, Aws_auth, Aws_region, Suppress_Type_Name, and tls. For more information, see Under the hood: FireLens for Amazon ECS Tasks.

    When you export logs to Amazon S3, you can specify the bucket using the bucket option. You can also specify region, total_file_size, upload_timeout, and use_put_object as options.

    This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

    " + "documentation":"

    The configuration options to send to the log driver.

    The options you can specify depend on the log driver. Some of the options you can specify when you use the awslogs log driver to route logs to Amazon CloudWatch include the following:

    awslogs-create-group

    Required: No

    Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false.

    Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group.

    awslogs-region

    Required: Yes

    Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.

    awslogs-group

    Required: Yes

    Make sure to specify a log group that the awslogs log driver sends its log streams to.

    awslogs-stream-prefix

    Required: Yes, when using Fargate.Optional when using EC2.

    Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id.

    If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.

    For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.

    You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.

    awslogs-datetime-format

    Required: No

    This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.

    For more information, see awslogs-datetime-format.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    awslogs-multiline-pattern

    Required: No

    This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    For more information, see awslogs-multiline-pattern.

    This option is ignored if awslogs-datetime-format is also configured.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    The following options apply to all supported log drivers.

    mode

    Required: No

    Valid values: non-blocking | blocking

    This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted.

    If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.

    If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

    You can set a default mode for all containers in a specific Amazon Web Services Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide.

    On June 25, 2025, Amazon ECS is changing the default log driver mode from blocking to non-blocking to prioritize task availability over logging. To continue using the blocking mode after this change, do one of the following:

    • Set the mode option in your container definition's logConfiguration as blocking.

    • Set the defaultLogDriverMode account setting to blocking.

    max-buffer-size

    Required: No

    Default value: 1m

    When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.

    To route logs using the splunk log router, you need to specify a splunk-token and a splunk-url.

    When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network destination for log storage and analytics, you can set the log-driver-buffer-limit option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.

    Other options you can specify when using awsfirelens to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with region and a name for the log stream with delivery_stream.

    When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a data stream name with stream.

    When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host (OpenSearch Service endpoint without protocol), Port, Index, Type, Aws_auth, Aws_region, Suppress_Type_Name, and tls. For more information, see Under the hood: FireLens for Amazon ECS Tasks.

    When you export logs to Amazon S3, you can specify the bucket using the bucket option. You can also specify region, total_file_size, upload_timeout, and use_put_object as options.

    This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

    " }, "secretOptions":{ "shape":"SecretList", @@ -4674,7 +4674,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The resource name for which to modify the account setting.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode -Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    The resource name for which to modify the account setting.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode -Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

      On June 25, 2025, Amazon ECS is changing the default log driver mode from blocking to non-blocking to prioritize task availability over logging. To continue using the blocking mode after this change, do one of the following:

      • Set the mode option in your container definition's logConfiguration as blocking.

      • Set the defaultLogDriverMode account setting to blocking.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " }, "value":{ "shape":"String", @@ -4700,7 +4700,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The Amazon ECS account setting name to modify.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode - Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    The Amazon ECS account setting name to modify.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode - Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

      On June 25, 2025, Amazon ECS is changing the default log driver mode from blocking to non-blocking to prioritize task availability over logging. To continue using the blocking mode after this change, do one of the following:

      • Set the mode option in your container definition's logConfiguration as blocking.

      • Set the defaultLogDriverMode account setting to blocking.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " }, "value":{ "shape":"String", @@ -7236,7 +7236,7 @@ "members":{ "cluster":{ "shape":"String", - "documentation":"

    The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. If you do not specify a cluster, the default cluster is assumed.

    " + "documentation":"

    The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. If you do not specify a cluster, the default cluster is assumed.

    You can't change the cluster name.

    " }, "service":{ "shape":"String", @@ -7252,7 +7252,7 @@ }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

    The capacity provider strategy to update the service to use.

    if the service uses the default capacity provider strategy for the cluster, the service can be updated to use one or more capacity providers as opposed to the default capacity provider strategy. However, when a service is using a capacity provider strategy that's not the default capacity provider strategy, the service can't be updated to use the cluster's default capacity provider strategy.

    A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

    If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateClusterCapacityProvider API operation.

    To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

    The PutClusterCapacityProvidersAPI operation is used to update the list of available capacity providers for a cluster after the cluster is created.

    " + "documentation":"

    The details of a capacity provider strategy. You can set a capacity provider when you create a cluster, run a task, or update a service.

    When you use Fargate, the capacity providers are FARGATE or FARGATE_SPOT.

    When you use Amazon EC2, the capacity providers are Auto Scaling groups.

    You can change capacity providers for rolling deployments and blue/green deployments.

    The following list provides the valid transitions:

    • Update the Fargate launch type to an EC2 capacity provider.

    • Update the Amazon EC2 launch type to a Fargate capacity provider.

    • Update the Fargate capacity provider to an EC2 capacity provider.

    • Update the Amazon EC2 capacity provider to a Fargate capacity provider.

    • Update the EC2 or Fargate capacity provider back to the launch type.

      Pass an empty list in the capacityProvider parameter.

    For information about Amazon Web Services CDK considerations, see Amazon Web Services CDK considerations.

    " }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -7296,7 +7296,7 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

    A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and the container port to access from the load balancer. The container name is as it appears in a container definition.

    When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are running.

    For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups. You can update from a single target group to multiple target groups and from multiple target groups to a single target group.

    For services that use blue/green deployments, you can update Elastic Load Balancing target groups by using CreateDeployment through CodeDeploy. Note that multiple target groups are not supported for blue/green deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    For services that use the external deployment controller, you can add, update, or remove load balancers by using CreateTaskSet. Note that multiple target groups are not supported for external deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    You can remove existing loadBalancers by passing an empty list.

    " + "documentation":"

    You must have a service-linked role when you update this property

    A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and the container port to access from the load balancer. The container name is as it appears in a container definition.

    When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are running.

    For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups. You can update from a single target group to multiple target groups and from multiple target groups to a single target group.

    For services that use blue/green deployments, you can update Elastic Load Balancing target groups by using CreateDeployment through CodeDeploy. Note that multiple target groups are not supported for blue/green deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    For services that use the external deployment controller, you can add, update, or remove load balancers by using CreateTaskSet. Note that multiple target groups are not supported for external deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    You can remove existing loadBalancers by passing an empty list.

    " }, "propagateTags":{ "shape":"PropagateTags", @@ -7304,7 +7304,7 @@ }, "serviceRegistries":{ "shape":"ServiceRegistries", - "documentation":"

    The details for the service discovery registries to assign to this service. For more information, see Service Discovery.

    When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the updated service registries configuration, and then stops the old tasks when the new tasks are running.

    You can remove existing serviceRegistries by passing an empty list.

    " + "documentation":"

    You must have a service-linked role when you update this property.

    For more information about the role see the CreateService request parameter role .

    The details for the service discovery registries to assign to this service. For more information, see Service Discovery.

    When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the updated service registries configuration, and then stops the old tasks when the new tasks are running.

    You can remove existing serviceRegistries by passing an empty list.

    " }, "serviceConnectConfiguration":{ "shape":"ServiceConnectConfiguration", @@ -7420,7 +7420,7 @@ }, "agentHash":{ "shape":"String", - "documentation":"

    The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

    " + "documentation":"

    The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

    " }, "dockerVersion":{ "shape":"String", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index e10e9f176165..8fa7ee58e4b5 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json index 8a7960d98327..3d3ccd0d215d 100644 --- a/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -57,152 +57,287 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], - "rules": [ + "endpoint": { + "url": "https://efs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree" + "endpoint": { + "url": "https://efs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] }, - true - ] - } - ], - "rules": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-us-gov" + ] + }, { "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", "argv": [ { "fn": "getAttr", @@ -210,105 +345,258 @@ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://elasticfilesystem.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] } \ No newline at end of file diff --git a/services/efs/src/main/resources/codegen-resources/endpoint-tests.json b/services/efs/src/main/resources/codegen-resources/endpoint-tests.json index 3270bbfe6da0..b47d439e3bbf 100644 --- a/services/efs/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/efs/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,471 +1,75 @@ { "testCases": [ { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-west-2.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For custom endpoint with fips enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-west-2.amazonaws.com" - } + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": true } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with fips disabled and dualstack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-west-3.amazonaws.com" - } + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "eu-west-3", + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.eu-west-3.amazonaws.com" + "url": "https://efs-fips.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-3", + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.me-south-1.amazonaws.com" + "url": "https://elasticfilesystem-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "me-south-1", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.sa-east-1.amazonaws.com" + "url": "https://efs.us-east-1.api.aws" } }, "params": { - "Region": "sa-east-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { @@ -482,423 +86,299 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-east-1.amazonaws.com" + "url": "https://efs-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-east-1", + "Region": "cn-northwest-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.us-east-2.amazonaws.com" - } - }, - "params": { - "Region": "us-east-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-east-2.amazonaws.com" + "url": "https://elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-east-2", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-west-1.amazonaws.com" + "url": "https://efs.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-west-1", + "Region": "cn-northwest-1", "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-west-2.amazonaws.com" + "url": "https://elasticfilesystem.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-west-2", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-west-2.amazonaws.com" + "url": "https://efs-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-west-2", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-east-1.api.aws" + "url": "https://elasticfilesystem-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-east-1.api.aws" + "url": "https://efs.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.cn-north-1.amazonaws.com.cn" + "url": "https://elasticfilesystem.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "cn-north-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.cn-north-1.amazonaws.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-gov-east-1.amazonaws.com" + "url": "https://elasticfilesystem.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-gov-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-gov-east-1", + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-gov-west-1.amazonaws.com" + "url": "https://elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-gov-west-1", + "Region": "us-isob-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem.us-gov-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-gov-east-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticfilesystem.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://elasticfilesystem-fips.eu-isoe-west-1.cloud.adc-e.uk" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", "expect": { "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-isob-east-1.sc2s.sgov.gov" + "url": "https://elasticfilesystem.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-isob-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://elasticfilesystem-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", "expect": { "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" + "url": "https://elasticfilesystem.us-isof-south-1.csp.hci.ic.gov" } }, "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/services/efs/src/main/resources/codegen-resources/service-2.json b/services/efs/src/main/resources/codegen-resources/service-2.json index b59403799c80..554e5f11ff8a 100644 --- a/services/efs/src/main/resources/codegen-resources/service-2.json +++ b/services/efs/src/main/resources/codegen-resources/service-2.json @@ -31,7 +31,7 @@ {"shape":"AccessPointLimitExceeded"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

    If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

    This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

    Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

    " + "documentation":"

    Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. A file system can have a maximum of 10,000 access points unless you request an increase. To learn more, see Mounting a file system using EFS access points.

    If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

    This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

    Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

    " }, "CreateFileSystem":{ "name":"CreateFileSystem", @@ -77,7 +77,7 @@ {"shape":"UnsupportedAvailabilityZone"}, {"shape":"AvailabilityZonesMismatch"} ], - "documentation":"

    Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

    You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

    You can create only one mount target for a One Zone file system. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

    For more information, see Amazon EFS: How it Works.

    To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

    In the request, provide the following:

    • The file system ID for which you are creating the mount target.

    • A subnet ID, which determines the following:

      • The VPC in which Amazon EFS creates the mount target

      • The Availability Zone in which Amazon EFS creates the mount target

      • The IP address range from which Amazon EFS selects the IP address of the mount target (if you don't specify an IP address in the request)

    After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

    Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

    • Must belong to the same VPC as the subnets of the existing mount targets

    • Must not be in the same Availability Zone as any of the subnets of the existing mount targets

    If the request satisfies the requirements, Amazon EFS does the following:

    • Creates a new mount target in the specified subnet.

    • Also creates a new network interface in the subnet as follows:

      • If the request provides an IpAddress, Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does when a request does not specify a primary private IP address).

      • If the request provides SecurityGroups, this network interface is associated with those security groups. Otherwise, it belongs to the default security group for the subnet's VPC.

      • Assigns the description Mount target fsmt-id for file system fs-id where fsmt-id is the mount target ID, and fs-id is the FileSystemId.

      • Sets the requesterManaged property of the network interface to true, and the requesterId value to EFS.

      Each Amazon EFS mount target has one corresponding requester-managed EC2 network interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to the network interface ID, and the IpAddress field to its address. If network interface creation fails, the entire CreateMountTarget operation fails.

    The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

    We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

    This operation requires permissions for the following action on the file system:

    • elasticfilesystem:CreateMountTarget

    This operation also requires permissions for the following Amazon EC2 actions:

    • ec2:DescribeSubnets

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    " + "documentation":"

    Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

    You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

    You can create only one mount target for a One Zone file system. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

    For more information, see Amazon EFS: How it Works.

    To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

    In the request, provide the following:

    • The file system ID for which you are creating the mount target.

    • A subnet ID, which determines the following:

      • The VPC in which Amazon EFS creates the mount target

      • The Availability Zone in which Amazon EFS creates the mount target

      • The IP address range from which Amazon EFS selects the IP address of the mount target (if you don't specify an IP address in the request)

    After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

    Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

    • Must belong to the same VPC as the subnets of the existing mount targets

    • Must not be in the same Availability Zone as any of the subnets of the existing mount targets

    If the request satisfies the requirements, Amazon EFS does the following:

    • Creates a new mount target in the specified subnet.

    • Also creates a new network interface in the subnet as follows:

      • If the request provides an IpAddress, Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does when a request does not specify a primary private IP address).

      • If the request provides SecurityGroups, this network interface is associated with those security groups. Otherwise, it belongs to the default security group for the subnet's VPC.

      • Assigns the description Mount target fsmt-id for file system fs-id where fsmt-id is the mount target ID, and fs-id is the FileSystemId.

      • Sets the requesterManaged property of the network interface to true, and the requesterId value to EFS.

      Each Amazon EFS mount target has one corresponding requester-managed EC2 network interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to the network interface ID, and the IpAddress field to its address. If network interface creation fails, the entire CreateMountTarget operation fails.

    The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

    We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS pricing. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

    This operation requires permissions for the following action on the file system:

    • elasticfilesystem:CreateMountTarget

    This operation also requires permissions for the following Amazon EC2 actions:

    • ec2:DescribeSubnets

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    " }, "CreateReplicationConfiguration":{ "name":"CreateReplicationConfiguration", @@ -312,7 +312,7 @@ {"shape":"BadRequest"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

    Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. Lifecycle management uses the LifecycleConfiguration object to identify when to move files between storage classes. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response.

    This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.

    " + "documentation":"

    Returns the current LifecycleConfiguration object for the specified EFS file system. Lifecycle management uses the LifecycleConfiguration object to identify when to move files between storage classes. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response.

    This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.

    " }, "DescribeMountTargetSecurityGroups":{ "name":"DescribeMountTargetSecurityGroups", @@ -789,7 +789,7 @@ }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

    The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " + "documentation":"

    The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web ServicesSupport. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " }, "AvailabilityZoneName":{ "shape":"AvailabilityZoneName", @@ -822,11 +822,19 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    Valid IPv4 address within the address range of the specified subnet.

    " + "documentation":"

    If the IP address type for the mount target is IPv4, then specify the IPv4 address within the address range of the specified subnet.

    " + }, + "Ipv6Address":{ + "shape":"Ipv6Address", + "documentation":"

    If the IP address type for the mount target is IPv6, then specify the IPv6 address within the address range of the specified subnet.

    " + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

    Specify the type of IP address of the mount target you are creating. Options are IPv4, dual stack, or IPv6. If you don’t specify an IpAddressType, then IPv4 is used.

    • IPV4_ONLY – Create mount target with IPv4 only subnet or dual-stack subnet.

    • DUAL_STACK – Create mount target with dual-stack subnet.

    • IPV6_ONLY – Create mount target with IPv6 only subnet.

    Creating IPv6 mount target only ENI in dual-stack subnet is not supported.

    " }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

    Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified.

    " + "documentation":"

    VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as the subnet specified. The maximum number of security groups depends on account quota. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Security Groups table).

    " } }, "documentation":"

    " @@ -1369,7 +1377,7 @@ }, "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

    The ID or ARN of the file system to use for the destination. For cross-account replication, this must be an ARN. The file system's replication overwrite replication must be disabled. If no ID or ARN is specified, then a new file system is created.

    " + "documentation":"

    The ID or ARN of the file system to use for the destination. For cross-account replication, this must be an ARN. The file system's replication overwrite replication must be disabled. If no ID or ARN is specified, then a new file system is created.

    When you initially configure replication to an existing file system, Amazon EFS writes data to or removes existing data from the destination file system to match data in the source file system. If you don't want to change data in the destination file system, then you should replicate to a new file system instead. For more information, see https://docs.aws.amazon.com/efs/latest/ug/create-replication.html.

    " }, "RoleArn":{ "shape":"RoleArn", @@ -1565,7 +1573,7 @@ "members":{ "ReplicationOverwriteProtection":{ "shape":"ReplicationOverwriteProtection", - "documentation":"

    The status of the file system's replication overwrite protection.

    • ENABLED – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ENABLED by default.

    • DISABLED – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.

    • REPLICATING – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is only modified only by EFS replication.

    If the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.

    " + "documentation":"

    The status of the file system's replication overwrite protection.

    • ENABLED – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ENABLED by default.

    • DISABLED – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.

    • REPLICATING – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication.

    If the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.

    " } }, "documentation":"

    Describes the protection on a file system.

    " @@ -1677,6 +1685,19 @@ "error":{"httpStatusCode":409}, "exception":true }, + "IpAddressType":{ + "type":"string", + "enum":[ + "IPV4_ONLY", + "IPV6_ONLY", + "DUAL_STACK" + ] + }, + "Ipv6Address":{ + "type":"string", + "max":39, + "min":3 + }, "KmsKeyId":{ "type":"string", "max":2048, @@ -1789,7 +1810,7 @@ }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

    An array of up to five VPC security group IDs.

    " + "documentation":"

    An array of VPC security group IDs.

    " } }, "documentation":"

    " @@ -1842,6 +1863,10 @@ "shape":"IpAddress", "documentation":"

    Address at which the file system can be mounted by using the mount target.

    " }, + "Ipv6Address":{ + "shape":"Ipv6Address", + "documentation":"

    The IPv6 address for the mount target.

    " + }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", "documentation":"

    The ID of the network interface that Amazon EFS created when it created the mount target.

    " @@ -1891,7 +1916,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    The calling account has reached the limit for elastic network interfaces for the specific Amazon Web Services Region. Either delete some network interfaces or request that the account quota be raised. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Network interfaces per Region entry in the Network interfaces table).

    ", + "documentation":"

    The calling account has reached the limit for elastic network interfaces for the specific Amazon Web Services Region. Either delete some network interfaces or request that the account quota be raised. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Network interfaces per Region entry in the Network interfaces table).

    ", "error":{"httpStatusCode":409}, "exception":true }, @@ -1947,7 +1972,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    Returned if the default file system policy is in effect for the EFS file system specified.

    ", + "documentation":"

    Returned if no backup is specified for a One Zone EFS file system.

    ", "error":{"httpStatusCode":404}, "exception":true }, @@ -2226,7 +2251,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    Returned if the size of SecurityGroups specified in the request is greater than five.

    ", + "documentation":"

    Returned if the number of SecurityGroups specified in the request is greater than the limit, which is based on account quota. Either delete some security groups or request that the account quota be raised. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Security Groups table).

    ", "error":{"httpStatusCode":400}, "exception":true }, @@ -2480,7 +2505,7 @@ }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

    (Optional) The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " + "documentation":"

    (Optional) The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web ServicesSupport. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " } } }, diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 576bd4eaefcf..c1572be99bb3 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/eks/src/main/resources/codegen-resources/service-2.json b/services/eks/src/main/resources/codegen-resources/service-2.json index fba86b68b0d0..40b255882e95 100644 --- a/services/eks/src/main/resources/codegen-resources/service-2.json +++ b/services/eks/src/main/resources/codegen-resources/service-2.json @@ -122,7 +122,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnsupportedAvailabilityZoneException"} ], - "documentation":"

    Creates an Amazon EKS control plane.

    The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances.

    The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

    Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster.

    You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

    You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

    CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Allowing users to access your cluster and Launching Amazon EKS nodes in the Amazon EKS User Guide.

    " + "documentation":"

    Creates an Amazon EKS control plane.

    The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances.

    The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

    Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster.

    You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. The endpoint domain name and IP address family depends on the value of the ipFamily for the cluster. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

    You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

    CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Allowing users to access your cluster and Launching Amazon EKS nodes in the Amazon EKS User Guide.

    " }, "CreateEksAnywhereSubscription":{ "name":"CreateEksAnywhereSubscription", @@ -194,7 +194,7 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

    Creates an EKS Pod Identity association between a service account in an Amazon EKS cluster and an IAM role with EKS Pod Identity. Use EKS Pod Identity to give temporary IAM credentials to pods and the credentials are rotated automatically.

    Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

    If a pod uses a service account that has an association, Amazon EKS sets environment variables in the containers of the pod. The environment variables configure the Amazon Web Services SDKs, including the Command Line Interface, to use the EKS Pod Identity credentials.

    Pod Identity is a simpler method than IAM roles for service accounts, as this method doesn't use OIDC identity providers. Additionally, you can configure a role for Pod Identity once, and reuse it across clusters.

    " + "documentation":"

    Creates an EKS Pod Identity association between a service account in an Amazon EKS cluster and an IAM role with EKS Pod Identity. Use EKS Pod Identity to give temporary IAM credentials to Pods and the credentials are rotated automatically.

    Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

    If a Pod uses a service account that has an association, Amazon EKS sets environment variables in the containers of the Pod. The environment variables configure the Amazon Web Services SDKs, including the Command Line Interface, to use the EKS Pod Identity credentials.

    EKS Pod Identity is a simpler method than IAM roles for service accounts, as this method doesn't use OIDC identity providers. Additionally, you can configure a role for EKS Pod Identity once, and reuse it across clusters.

    Similar to Amazon Web Services IAM behavior, EKS Pod Identity associations are eventually consistent, and may take several seconds to be effective after the initial API call returns successfully. You must design your applications to account for these potential delays. We recommend that you don’t include association create/updates in the critical, high-availability code paths of your application. Instead, make changes in a separate initialization or setup routine that you run less frequently.

    You can set a target IAM role in the same or a different account for advanced scenarios. With a target role, EKS Pod Identity automatically performs two role assumptions in sequence: first assuming the role in the association that is in this account, then using those credentials to assume the target IAM role. This process provides your Pod with temporary credentials that have the permissions defined in the target role, allowing secure access to resources in another Amazon Web Services account.

    " }, "DeleteAccessEntry":{ "name":"DeleteAccessEntry", @@ -710,7 +710,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Returns a list of all insights checked for against the specified cluster. You can filter which insights are returned by category, associated Kubernetes version, and status.

    " + "documentation":"

    Returns a list of all insights checked for against the specified cluster. You can filter which insights are returned by category, associated Kubernetes version, and status. The default filter lists all categories and every status.

    The following lists the available categories:

    • UPGRADE_READINESS: Amazon EKS identifies issues that could impact your ability to upgrade to new versions of Kubernetes. These are called upgrade insights.

    • MISCONFIGURATION: Amazon EKS identifies misconfiguration in your EKS Hybrid Nodes setup that could impair functionality of your cluster or workloads. These are called configuration insights.

    " }, "ListNodegroups":{ "name":"ListNodegroups", @@ -874,7 +874,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with DescribeUpdate.

    You can use this operation to do the following actions:

    • You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide .

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    • You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    • You can also use this API operation to choose different subnets and security groups for the cluster. You must specify at least two subnets that are in different Availability Zones. You can't change which VPC the subnets are from, the subnets must be in the same VPC as the subnets that the cluster was created with. For more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User Guide .

    • You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.

    • You can also use this API operation to add, change, or remove the configuration in the cluster for EKS Hybrid Nodes. To remove the configuration, use the remoteNetworkConfig key with an object containing both subkeys with empty arrays for each. Here is an inline example: \"remoteNetworkConfig\": { \"remoteNodeNetworks\": [], \"remotePodNetworks\": [] }.

    Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

    " + "documentation":"

    Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with DescribeUpdate.

    You can use this operation to do the following actions:

    • You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide .

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    • You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    • You can also use this API operation to choose different subnets and security groups for the cluster. You must specify at least two subnets that are in different Availability Zones. You can't change which VPC the subnets are from, the subnets must be in the same VPC as the subnets that the cluster was created with. For more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User Guide .

    • You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.

    • You can also use this API operation to add, change, or remove the configuration in the cluster for EKS Hybrid Nodes. To remove the configuration, use the remoteNetworkConfig key with an object containing both subkeys with empty arrays for each. Here is an inline example: \"remoteNetworkConfig\": { \"remoteNodeNetworks\": [], \"remotePodNetworks\": [] }.

    Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

    " }, "UpdateClusterVersion":{ "name":"UpdateClusterVersion", @@ -963,7 +963,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Updates a EKS Pod Identity association. Only the IAM role can be changed; an association can't be moved between clusters, namespaces, or service accounts. If you need to edit the namespace or service account, you need to delete the association and then create a new association with your desired settings.

    " + "documentation":"

    Updates a EKS Pod Identity association. In an update, you can change the IAM role, the target IAM role, or disableSessionTags. You must change at least one of these in an update. An association can't be moved between clusters, namespaces, or service accounts. If you need to edit the namespace or service account, you need to delete the association and then create a new association with your desired settings.

    Similar to Amazon Web Services IAM behavior, EKS Pod Identity associations are eventually consistent, and may take several seconds to be effective after the initial API call returns successfully. You must design your applications to account for these potential delays. We recommend that you don’t include association create/updates in the critical, high-availability code paths of your application. Instead, make changes in a separate initialization or setup routine that you run less frequently.

    You can set a target IAM role in the same or a different account for advanced scenarios. With a target role, EKS Pod Identity automatically performs two role assumptions in sequence: first assuming the role in the association that is in this account, then using those credentials to assume the target IAM role. This process provides your Pod with temporary credentials that have the permissions defined in the target role, allowing secure access to resources in another Amazon Web Services account.

    " } }, "shapes":{ @@ -1164,7 +1164,7 @@ }, "podIdentityAssociations":{ "shape":"StringList", - "documentation":"

    An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    An array of EKS Pod Identity associations owned by the add-on. Each association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " } }, "documentation":"

    An Amazon EKS add-on. For more information, see Amazon EKS add-ons in the Amazon EKS User Guide.

    " @@ -1280,7 +1280,7 @@ "documentation":"

    The ARN of an IAM Role.

    " } }, - "documentation":"

    A type of Pod Identity Association owned by an Amazon EKS Add-on.

    Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    A type of EKS Pod Identity association owned by an Amazon EKS add-on.

    Each association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " }, "AddonPodIdentityAssociationsList":{ "type":"list", @@ -1291,14 +1291,14 @@ "members":{ "serviceAccount":{ "shape":"String", - "documentation":"

    The Kubernetes Service Account name used by the addon.

    " + "documentation":"

    The Kubernetes Service Account name used by the add-on.

    " }, "recommendedManagedPolicies":{ "shape":"StringList", - "documentation":"

    A suggested IAM Policy for the addon.

    " + "documentation":"

    A suggested IAM Policy for the add-on.

    " } }, - "documentation":"

    Information about how to configure IAM for an Addon.

    " + "documentation":"

    Information about how to configure IAM for an add-on.

    " }, "AddonPodIdentityConfigurationList":{ "type":"list", @@ -1330,7 +1330,7 @@ }, "computeTypes":{ "shape":"StringList", - "documentation":"

    Indicates the compute type of the addon version.

    " + "documentation":"

    Indicates the compute type of the add-on version.

    " }, "compatibilities":{ "shape":"Compatibilities", @@ -1342,7 +1342,7 @@ }, "requiresIamPermissions":{ "shape":"Boolean", - "documentation":"

    Indicates if the Addon requires IAM Permissions to operate, such as networking permissions.

    " + "documentation":"

    Indicates if the add-on requires IAM Permissions to operate, such as networking permissions.

    " } }, "documentation":"

    Information about an add-on version.

    " @@ -1565,7 +1565,10 @@ }, "Category":{ "type":"string", - "enum":["UPGRADE_READINESS"] + "enum":[ + "UPGRADE_READINESS", + "MISCONFIGURATION" + ] }, "CategoryList":{ "type":"list", @@ -2112,7 +2115,7 @@ }, "podIdentityAssociations":{ "shape":"AddonPodIdentityAssociationsList", - "documentation":"

    An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    An array of EKS Pod Identity associations to be created. Each association maps a Kubernetes service account to an IAM role.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " } } }, @@ -2177,7 +2180,7 @@ }, "bootstrapSelfManagedAddons":{ "shape":"BoxedBoolean", - "documentation":"

    If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

    The default networking addons include vpc-cni, coredns, and kube-proxy.

    Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

    " + "documentation":"

    If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

    The default networking add-ons include vpc-cni, coredns, and kube-proxy.

    Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

    " }, "upgradePolicy":{ "shape":"UpgradePolicyRequest", @@ -2417,13 +2420,13 @@ "members":{ "clusterName":{ "shape":"String", - "documentation":"

    The name of the cluster to create the association in.

    ", + "documentation":"

    The name of the cluster to create the EKS Pod Identity association in.

    ", "location":"uri", "locationName":"name" }, "namespace":{ "shape":"String", - "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.

    " + "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the EKS Pod Identity association in. The service account and the Pods that use the service account must be in this namespace.

    " }, "serviceAccount":{ "shape":"String", @@ -2431,7 +2434,7 @@ }, "roleArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.

    " }, "clientRequestToken":{ "shape":"String", @@ -2441,6 +2444,14 @@ "tags":{ "shape":"TagMap", "documentation":"

    Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource – 50

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length – 128 Unicode characters in UTF-8

    • Maximum value length – 256 Unicode characters in UTF-8

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case-sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

    " + }, + "disableSessionTags":{ + "shape":"BoxedBoolean", + "documentation":"

    Disable the automatic sessions tags that are appended by EKS Pod Identity.

    EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to Amazon Web Services resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see List of session tags added by EKS Pod Identity in the Amazon EKS User Guide.

    Amazon Web Services compresses inline session policies, managed policy ARNs, and session tags into a packed binary format that has a separate limit. If you receive a PackedPolicyTooLarge error indicating the packed binary format has exceeded the size limit, you can attempt to reduce the size by disabling the session tags added by EKS Pod Identity.

    " + }, + "targetRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.

    When you run applications on Amazon EKS, your application might need to access Amazon Web Services resources from a different role that exists in the same or different Amazon Web Services account. For example, your application running in “Account A” might need to access resources, such as Amazon S3 buckets in “Account B” or within “Account A” itself. You can create a association to access Amazon Web Services resources in “Account B” by creating two IAM roles: a role in “Account A” and a role in “Account B” (which can be the same or different account), each with the necessary trust and permission policies. After you provide these roles in the IAM role and Target IAM role fields, EKS will perform role chaining to ensure your application gets the required permissions. This means Role A will assume Role B, allowing your Pods to securely access resources like S3 buckets in the target account.

    " } } }, @@ -2476,8 +2487,7 @@ }, "DeleteAccessEntryResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAddonRequest":{ "type":"structure", @@ -2760,7 +2770,7 @@ }, "podIdentityConfiguration":{ "shape":"AddonPodIdentityConfigurationList", - "documentation":"

    The Kubernetes service account name used by the addon, and any suggested IAM policies. Use this information to create an IAM Role for the Addon.

    " + "documentation":"

    The Kubernetes service account name used by the add-on, and any suggested IAM policies. Use this information to create an IAM Role for the add-on.

    " } } }, @@ -3195,8 +3205,7 @@ }, "DisassociateAccessPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateIdentityProviderConfigRequest":{ "type":"structure", @@ -3742,7 +3751,7 @@ "members":{ "categories":{ "shape":"CategoryList", - "documentation":"

    The categories to use to filter insights.

    " + "documentation":"

    The categories to use to filter insights. The following lists the available categories:

    • UPGRADE_READINESS: Amazon EKS identifies issues that could impact your ability to upgrade to new versions of Kubernetes. These are called upgrade insights.

    • MISCONFIGURATION: Amazon EKS identifies misconfiguration in your EKS Hybrid Nodes setup that could impair functionality of your cluster or workloads. These are called configuration insights.

    " }, "kubernetesVersions":{ "shape":"StringList", @@ -4974,7 +4983,7 @@ }, "namespace":{ "shape":"String", - "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.

    " + "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the Pods that use the service account must be in this namespace.

    " }, "serviceAccount":{ "shape":"String", @@ -4982,7 +4991,7 @@ }, "roleArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.

    " }, "associationArn":{ "shape":"String", @@ -5002,11 +5011,23 @@ }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

    The most recent timestamp that the association was modified at

    " + "documentation":"

    The most recent timestamp that the association was modified at.

    " }, "ownerArn":{ "shape":"String", - "documentation":"

    If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

    " + "documentation":"

    If defined, the EKS Pod Identity association is owned by an Amazon EKS add-on.

    " + }, + "disableSessionTags":{ + "shape":"BoxedBoolean", + "documentation":"

    The state of the automatic sessions tags. The value of true disables these tags.

    EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to Amazon Web Services resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see List of session tags added by EKS Pod Identity in the Amazon EKS User Guide.

    " + }, + "targetRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.

    " + }, + "externalId":{ + "shape":"String", + "documentation":"

    The unique identifier for this EKS Pod Identity association for a target IAM role. You put this value in the trust policy of the target role, in a Condition to match the sts.ExternalId. This ensures that the target role can only be assumed by this association. This prevents the confused deputy problem. For more information about the confused deputy problem, see The confused deputy problem in the IAM User Guide.

    If you want to use the same target role with multiple associations or other roles, use independent statements in the trust policy to allow sts:AssumeRole access from each role.

    " } }, "documentation":"

    Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

    " @@ -5024,7 +5045,7 @@ }, "namespace":{ "shape":"String", - "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.

    " + "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the Pods that use the service account must be in this namespace.

    " }, "serviceAccount":{ "shape":"String", @@ -5040,7 +5061,7 @@ }, "ownerArn":{ "shape":"String", - "documentation":"

    If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

    " + "documentation":"

    If defined, the association is owned by an Amazon EKS add-on.

    " } }, "documentation":"

    The summarized description of the association.

    Each summary is simplified by removing these fields compared to the full PodIdentityAssociation :

    • The IAM role: roleArn

    • The timestamp that the association was created at: createdAt

    • The most recent timestamp that the association was modified at:. modifiedAt

    • The tags on the association: tags

    " @@ -5106,11 +5127,11 @@ "members":{ "remoteNodeNetworks":{ "shape":"RemoteNodeNetworkList", - "documentation":"

    The list of network CIDRs that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " + "documentation":"

    The list of network CIDRs that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " }, "remotePodNetworks":{ "shape":"RemotePodNetworkList", - "documentation":"

    The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " + "documentation":"

    The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " } }, "documentation":"

    The configuration in the cluster for EKS Hybrid Nodes. You can add, change, or remove this configuration after the cluster is created.

    " @@ -5134,10 +5155,10 @@ "members":{ "cidrs":{ "shape":"StringList", - "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " + "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " } }, - "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " + "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " }, "RemoteNodeNetworkList":{ "type":"list", @@ -5149,10 +5170,10 @@ "members":{ "cidrs":{ "shape":"StringList", - "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " + "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " } }, - "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " + "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " }, "RemotePodNetworkList":{ "type":"list", @@ -5379,8 +5400,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -5474,8 +5494,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Update":{ "type":"structure", @@ -5602,7 +5621,7 @@ }, "podIdentityAssociations":{ "shape":"AddonPodIdentityAssociationsList", - "documentation":"

    An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    An array of EKS Pod Identity associations to be updated. Each association maps a Kubernetes service account to an IAM role. If this value is left blank, no change. If an empty array is provided, existing associations owned by the add-on are deleted.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " } } }, @@ -5926,12 +5945,20 @@ }, "roleArn":{ "shape":"String", - "documentation":"

    The new IAM role to change the

    " + "documentation":"

    The new IAM role to change in the association.

    " }, "clientRequestToken":{ "shape":"String", "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", "idempotencyToken":true + }, + "disableSessionTags":{ + "shape":"BoxedBoolean", + "documentation":"

    Disable the automatic sessions tags that are appended by EKS Pod Identity.

    EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to Amazon Web Services resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see List of session tags added by EKS Pod Identity in the Amazon EKS User Guide.

    Amazon Web Services compresses inline session policies, managed policy ARNs, and session tags into a packed binary format that has a separate limit. If you receive a PackedPolicyTooLarge error indicating the packed binary format has exceeded the size limit, you can attempt to reduce the size by disabling the session tags added by EKS Pod Identity.

    " + }, + "targetRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.

    When you run applications on Amazon EKS, your application might need to access Amazon Web Services resources from a different role that exists in the same or different Amazon Web Services account. For example, your application running in “Account A” might need to access resources, such as buckets in “Account B” or within “Account A” itself. You can create a association to access Amazon Web Services resources in “Account B” by creating two IAM roles: a role in “Account A” and a role in “Account B” (which can be the same or different account), each with the necessary trust and permission policies. After you provide these roles in the IAM role and Target IAM role fields, EKS will perform role chaining to ensure your application gets the required permissions. This means Role A will assume Role B, allowing your Pods to securely access resources like S3 buckets in the target account.

    " } } }, @@ -5940,7 +5967,7 @@ "members":{ "association":{ "shape":"PodIdentityAssociation", - "documentation":"

    The full description of the EKS Pod Identity association that was updated.

    " + "documentation":"

    The full description of the association that was updated.

    " } } }, @@ -6027,15 +6054,15 @@ }, "endpointPublicAccess":{ "shape":"BoxedBoolean", - "documentation":"

    Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. The endpoint domain name and IP address family depends on the value of the ipFamily for the cluster. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    " }, "endpointPrivateAccess":{ "shape":"BoxedBoolean", - "documentation":"

    Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    " }, "publicAccessCidrs":{ "shape":"StringList", - "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0 and additionally ::/0 for dual-stack `IPv6` clusters. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    Note that the public endpoints are dual-stack for only IPv6 clusters that are made after October 2024. You can't add IPv6 CIDR blocks to IPv4 clusters or IPv6 clusters that were made before October 2024.

    " } }, "documentation":"

    An object representing the VPC configuration to use for an Amazon EKS cluster.

    " @@ -6065,11 +6092,11 @@ }, "endpointPrivateAccess":{ "shape":"Boolean", - "documentation":"

    This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    " }, "publicAccessCidrs":{ "shape":"StringList", - "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint.

    " + "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0 and additionally ::/0 for dual-stack `IPv6` clusters. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    Note that the public endpoints are dual-stack for only IPv6 clusters that are made after October 2024. You can't add IPv6 CIDR blocks to IPv4 clusters or IPv6 clusters that were made before October 2024.

    " } }, "documentation":"

    An object representing an Amazon EKS cluster VPC configuration response.

    " diff --git a/services/eksauth/pom.xml b/services/eksauth/pom.xml index 6cdea7d98c3e..851c5d58dc48 100644 --- a/services/eksauth/pom.xml +++ b/services/eksauth/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT eksauth AWS Java SDK :: Services :: EKS Auth diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index bc4705a142be..9e830e2779b8 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 1ab98c044051..d5cecc043f81 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 82b8248ed212..3a9fe0e72b13 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index b4d62ddb61bc..dc0314297fda 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index efcfdd207f8b..fe87ad9c0f76 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index a2cc63b9bc27..ebe6889449a1 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 172fffaa4e1c..3088d08f2895 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 0fd70126ce65..db4463992d84 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index ee2116bbd411..ae41567b9d17 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/emrserverless/src/main/resources/codegen-resources/service-2.json b/services/emrserverless/src/main/resources/codegen-resources/service-2.json index d32f0d655d96..7fb0de82b959 100644 --- a/services/emrserverless/src/main/resources/codegen-resources/service-2.json +++ b/services/emrserverless/src/main/resources/codegen-resources/service-2.json @@ -24,8 +24,8 @@ "output":{"shape":"CancelJobRunResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Cancels a job run.

    ", "idempotent":true @@ -41,8 +41,8 @@ "output":{"shape":"CreateApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], "documentation":"

    Creates an application.

    ", @@ -59,8 +59,8 @@ "output":{"shape":"DeleteApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Deletes an application. An application has to be in a stopped or created state in order to be deleted.

    ", "idempotent":true @@ -76,8 +76,8 @@ "output":{"shape":"GetApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Displays detailed information about a specified application.

    " }, @@ -108,8 +108,8 @@ "output":{"shape":"GetJobRunResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Displays detailed information about a job run.

    " }, @@ -139,8 +139,8 @@ "output":{"shape":"ListJobRunAttemptsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Lists all attempt of a job run.

    " }, @@ -170,8 +170,8 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Lists the tags assigned to the resources.

    " }, @@ -186,8 +186,8 @@ "output":{"shape":"StartApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Starts a specified application and initializes initial capacity if configured.

    ", @@ -222,8 +222,8 @@ "output":{"shape":"StopApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Stops a specified application and releases initial capacity if configured. All scheduled and running jobs must be completed or cancelled before stopping an application.

    ", "idempotent":true @@ -239,8 +239,8 @@ "output":{"shape":"TagResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Assigns tags to resources. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize your Amazon Web Services resources by attributes such as purpose, owner, or environment. When you have many resources of the same type, you can quickly identify a specific resource based on the tags you've assigned to it.

    " }, @@ -255,8 +255,8 @@ "output":{"shape":"UntagResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Removes tags from resources.

    ", "idempotent":true @@ -272,8 +272,8 @@ "output":{"shape":"UpdateApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Updates a specified application. An application has to be in a stopped or created state in order to be updated.

    " } @@ -481,6 +481,12 @@ "X86_64" ] }, + "Arn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"([ -~… -퟿-�က0-ჿFF]+)" + }, "AttemptNumber":{ "type":"integer", "box":true, @@ -538,6 +544,12 @@ "documentation":"

    The ID of the job run to cancel.

    ", "location":"uri", "locationName":"jobRunId" + }, + "shutdownGracePeriodInSeconds":{ + "shape":"ShutdownGracePeriodInSeconds", + "documentation":"

    The duration in seconds to wait before forcefully terminating the job after cancellation is requested.

    ", + "location":"querystring", + "locationName":"shutdownGracePeriodInSeconds" } } }, @@ -811,14 +823,15 @@ }, "EntryPointArgument":{ "type":"string", - "max":10280, "min":1, "pattern":".*\\S.*", "sensitive":true }, "EntryPointArguments":{ "type":"list", - "member":{"shape":"EntryPointArgument"} + "member":{"shape":"EntryPointArgument"}, + "max":1024, + "min":0 }, "EntryPointPath":{ "type":"string", @@ -1122,6 +1135,7 @@ "shape":"IAMRoleArn", "documentation":"

    The execution role ARN of the job run.

    " }, + "executionIamPolicy":{"shape":"JobRunExecutionIamPolicy"}, "state":{ "shape":"JobRunState", "documentation":"

    The state of the job run.

    " @@ -1282,6 +1296,20 @@ "type":"list", "member":{"shape":"JobRunAttemptSummary"} }, + "JobRunExecutionIamPolicy":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"PolicyDocument", + "documentation":"

    An IAM inline policy to use as an execution IAM policy.

    " + }, + "policyArns":{ + "shape":"PolicyArnList", + "documentation":"

    A list of Amazon Resource Names (ARNs) to use as an execution IAM policy.

    " + } + }, + "documentation":"

    Optional IAM policy. The resulting job IAM role permissions will be an intersection of the policies passed and the policy associated with your job execution role.

    " + }, "JobRunId":{ "type":"string", "max":64, @@ -1707,6 +1735,18 @@ "min":1, "pattern":"[A-Za-z0-9_=-]+" }, + "PolicyArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":10, + "min":0 + }, + "PolicyDocument":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"([ -ÿ]+)" + }, "PrometheusMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1859,6 +1899,10 @@ }, "exception":true }, + "ShutdownGracePeriodInSeconds":{ + "type":"integer", + "box":true + }, "SparkSubmit":{ "type":"structure", "required":["entryPoint"], @@ -1925,6 +1969,10 @@ "shape":"IAMRoleArn", "documentation":"

    The execution role ARN for the job run.

    " }, + "executionIamPolicy":{ + "shape":"JobRunExecutionIamPolicy", + "documentation":"

    You can pass an optional IAM policy. The resulting job IAM role permissions will be an intersection of this policy and the policy associated with your job execution role.

    " + }, "jobDriver":{ "shape":"JobDriver", "documentation":"

    The job driver for the job run.

    " diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml index 5c585fc7c6d2..15daec3a95da 100644 --- a/services/entityresolution/pom.xml +++ b/services/entityresolution/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT entityresolution AWS Java SDK :: Services :: Entity Resolution diff --git a/services/entityresolution/src/main/resources/codegen-resources/service-2.json b/services/entityresolution/src/main/resources/codegen-resources/service-2.json index c26c75e970f2..33a3398c060b 100644 --- a/services/entityresolution/src/main/resources/codegen-resources/service-2.json +++ b/services/entityresolution/src/main/resources/codegen-resources/service-2.json @@ -222,6 +222,24 @@ "documentation":"

    Deletes the SchemaMapping with a given name. This operation will succeed even if a schema with the given name does not exist. This operation will fail if there is a MatchingWorkflow object that references the SchemaMapping in the workflow's InputSourceConfig.

    ", "idempotent":true }, + "GenerateMatchId":{ + "name":"GenerateMatchId", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows/{workflowName}/generateMatches", + "responseCode":200 + }, + "input":{"shape":"GenerateMatchIdInput"}, + "output":{"shape":"GenerateMatchIdOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Generates or retrieves Match IDs for records using a rule-based matching workflow. When you call this operation, it processes your records against the workflow's matching rules to identify potential matches. For existing records, it retrieves their Match IDs and associated rules. For records without matches, it generates new Match IDs. The operation saves results to Amazon S3.

    The processing type (processingType) you choose affects both the accuracy and response time of the operation. Additional charges apply for each API call, whether made through the Entity Resolution console or directly via the API. The rule-based matching workflow must exist and be active before calling this operation.

    " + }, "GetIdMappingJob":{ "name":"GetIdMappingJob", "http":{ @@ -238,7 +256,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

    Gets the status, metrics, and errors (if there are any) that are associated with a job.

    " + "documentation":"

    Returns the status, metrics, and errors (if there are any) that are associated with a job.

    " }, "GetIdMappingWorkflow":{ "name":"GetIdMappingWorkflow", @@ -310,7 +328,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

    Gets the status, metrics, and errors (if there are any) that are associated with a job.

    " + "documentation":"

    Returns the status, metrics, and errors (if there are any) that are associated with a job.

    " }, "GetMatchingWorkflow":{ "name":"GetMatchingWorkflow", @@ -1412,6 +1430,83 @@ }, "exception":true }, + "FailedRecord":{ + "type":"structure", + "required":[ + "inputSourceARN", + "uniqueId", + "errorMessage" + ], + "members":{ + "inputSourceARN":{ + "shape":"FailedRecordInputSourceARNString", + "documentation":"

    The input source ARN of the record that didn't generate a Match ID.

    " + }, + "uniqueId":{ + "shape":"String", + "documentation":"

    The unique ID of the record that didn't generate a Match ID.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message for the record that didn't generate a Match ID.

    " + } + }, + "documentation":"

    The record that didn't generate a Match ID.

    " + }, + "FailedRecordInputSourceARNString":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})" + }, + "FailedRecordsList":{ + "type":"list", + "member":{"shape":"FailedRecord"} + }, + "GenerateMatchIdInput":{ + "type":"structure", + "required":[ + "workflowName", + "records" + ], + "members":{ + "workflowName":{ + "shape":"EntityName", + "documentation":"

    The name of the rule-based matching workflow.

    ", + "location":"uri", + "locationName":"workflowName" + }, + "records":{ + "shape":"GenerateMatchIdInputRecordsList", + "documentation":"

    The records to match.

    " + }, + "processingType":{ + "shape":"ProcessingType", + "documentation":"

    The processing mode that determines how Match IDs are generated and results are saved. Each mode provides different levels of accuracy, response time, and completeness of results.

    If not specified, defaults to CONSISTENT.

    CONSISTENT: Performs immediate lookup and matching against all existing records, with results saved synchronously. Provides highest accuracy but slower response time.

    EVENTUAL (shown as Background in the console): Performs initial match ID lookup or generation immediately, with record updates processed asynchronously in the background. Offers faster initial response time, with complete matching results available later in S3.

    EVENTUAL_NO_LOOKUP (shown as Quick ID generation in the console): Generates new match IDs without checking existing matches, with updates processed asynchronously. Provides fastest response time but should only be used for records known to be unique.

    " + } + } + }, + "GenerateMatchIdInputRecordsList":{ + "type":"list", + "member":{"shape":"Record"}, + "max":1, + "min":1 + }, + "GenerateMatchIdOutput":{ + "type":"structure", + "required":[ + "matchGroups", + "failedRecords" + ], + "members":{ + "matchGroups":{ + "shape":"MatchGroupsList", + "documentation":"

    The match groups from the generated match ID.

    " + }, + "failedRecords":{ + "shape":"FailedRecordsList", + "documentation":"

    The records that didn't receive a generated Match ID.

    " + } + } + }, "GetIdMappingJobInput":{ "type":"structure", "required":[ @@ -1664,7 +1759,7 @@ "members":{ "jobId":{ "shape":"JobId", - "documentation":"

    The ID of the job.

    " + "documentation":"

    The unique identifier of the matching job.

    " }, "status":{ "shape":"JobStatus", @@ -1921,7 +2016,7 @@ }, "mappedInputFields":{ "shape":"SchemaInputAttributes", - "documentation":"

    A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Venice uses for matching.

    " + "documentation":"

    A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Entity Resolution uses for matching.

    " }, "createdAt":{ "shape":"Timestamp", @@ -2754,6 +2849,33 @@ } } }, + "MatchGroup":{ + "type":"structure", + "required":[ + "records", + "matchId", + "matchRule" + ], + "members":{ + "records":{ + "shape":"MatchedRecordsList", + "documentation":"

    The matched records.

    " + }, + "matchId":{ + "shape":"String", + "documentation":"

    The match ID.

    " + }, + "matchRule":{ + "shape":"String", + "documentation":"

    The match rule of the match group.

    " + } + }, + "documentation":"

    The match group.

    " + }, + "MatchGroupsList":{ + "type":"list", + "member":{"shape":"MatchGroup"} + }, "MatchPurpose":{ "type":"string", "enum":[ @@ -2761,6 +2883,32 @@ "INDEXING" ] }, + "MatchedRecord":{ + "type":"structure", + "required":[ + "inputSourceARN", + "recordId" + ], + "members":{ + "inputSourceARN":{ + "shape":"MatchedRecordInputSourceARNString", + "documentation":"

    The input source ARN of the matched record.

    " + }, + "recordId":{ + "shape":"String", + "documentation":"

    The record ID of the matched record.

    " + } + }, + "documentation":"

    The matched record.

    " + }, + "MatchedRecordInputSourceARNString":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})" + }, + "MatchedRecordsList":{ + "type":"list", + "member":{"shape":"MatchedRecord"} + }, "MatchingWorkflowArn":{ "type":"string", "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})" @@ -2915,6 +3063,14 @@ "min":36, "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" }, + "ProcessingType":{ + "type":"string", + "enum":[ + "CONSISTENT", + "EVENTUAL", + "EVENTUAL_NO_LOOKUP" + ] + }, "ProviderComponentSchema":{ "type":"structure", "members":{ @@ -3141,6 +3297,29 @@ } } }, + "Record":{ + "type":"structure", + "required":[ + "inputSourceARN", + "uniqueId", + "recordAttributeMap" + ], + "members":{ + "inputSourceARN":{ + "shape":"RecordInputSourceARNString", + "documentation":"

    The input source ARN of the record.

    " + }, + "uniqueId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID of the record.

    " + }, + "recordAttributeMap":{ + "shape":"RecordAttributeMapString255", + "documentation":"

    The record's attribute map.

    " + } + }, + "documentation":"

    The record.

    " + }, "RecordAttributeMap":{ "type":"map", "key":{"shape":"RecordAttributeMapKeyString"}, @@ -3153,12 +3332,32 @@ "min":0, "pattern":"[a-zA-Z_0-9- \\t]*" }, + "RecordAttributeMapString255":{ + "type":"map", + "key":{"shape":"RecordAttributeMapString255KeyString"}, + "value":{"shape":"RecordAttributeMapString255ValueString"}, + "sensitive":true + }, + "RecordAttributeMapString255KeyString":{ + "type":"string", + "max":255, + "min":0 + }, + "RecordAttributeMapString255ValueString":{ + "type":"string", + "max":255, + "min":0 + }, "RecordAttributeMapValueString":{ "type":"string", "max":255, "min":0, "pattern":"[a-zA-Z_0-9-./@ ()+\\t]*" }, + "RecordInputSourceARNString":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})" + }, "RecordMatchingModel":{ "type":"string", "enum":[ @@ -3561,6 +3760,12 @@ "retryable":{"throttling":true} }, "Timestamp":{"type":"timestamp"}, + "UniqueId":{ + "type":"string", + "max":38, + "min":1, + "pattern":"[a-zA-Z0-9_-]*" + }, "UniqueIdList":{ "type":"list", "member":{"shape":"HeaderSafeUniqueId"} @@ -3812,7 +4017,7 @@ }, "resolutionTechniques":{ "shape":"ResolutionTechniques", - "documentation":"

    An object which defines the resolutionType and the ruleBasedProperties

    " + "documentation":"

    An object which defines the resolutionType and the ruleBasedProperties.

    " }, "incrementalRunConfig":{ "shape":"IncrementalRunConfig", diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 67dc1c723b94..50a71e222685 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json b/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json +++ b/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/eventbridge/src/main/resources/codegen-resources/service-2.json b/services/eventbridge/src/main/resources/codegen-resources/service-2.json index e25c3b98f4b3..0b6e47877f20 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/service-2.json +++ b/services/eventbridge/src/main/resources/codegen-resources/service-2.json @@ -877,8 +877,7 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You do not have the necessary permissions for this action.

    ", "exception":true }, @@ -1234,8 +1233,7 @@ }, "ConcurrentModificationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There is concurrent modification on a rule, target, archive, or replay.

    ", "exception":true }, @@ -2040,8 +2038,7 @@ }, "DeleteApiDestinationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteArchiveRequest":{ "type":"structure", @@ -2055,8 +2052,7 @@ }, "DeleteArchiveResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConnectionRequest":{ "type":"structure", @@ -2105,8 +2101,7 @@ }, "DeleteEndpointResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventBusRequest":{ "type":"structure", @@ -2938,7 +2933,7 @@ "type":"string", "max":1600, "min":1, - "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[/\\.\\-_A-Za-z0-9]+" + "pattern":"(arn:aws[\\w-]*:events:[a-z]+-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[/\\.\\-_A-Za-z0-9]+" }, "EventId":{ "type":"string", @@ -3100,8 +3095,7 @@ }, "IllegalStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    An error occurred because a replay can be canceled only when the state is Running or Starting.

    ", "exception":true }, @@ -3129,23 +3123,20 @@ "Integer":{"type":"integer"}, "InternalException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception occurs due to unexpected causes.

    ", "exception":true, "fault":true }, "InvalidEventPatternException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event pattern is not valid.

    ", "exception":true }, "InvalidStateException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified state is not a valid state for an event source.

    ", "exception":true }, @@ -3175,8 +3166,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The request failed because it attempted to create resource beyond the allowed service quota.

    ", "exception":true }, @@ -3620,8 +3610,7 @@ }, "ManagedRuleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This rule was created by an Amazon Web Services service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

    ", "exception":true }, @@ -3658,7 +3647,7 @@ "type":"string", "max":512, "min":1, - "pattern":"^arn:aws[a-z-]*:events:[a-z]{2}-[a-z-]+-\\d+:\\d{12}:event-bus/[\\w.-]+$" + "pattern":"^arn:aws[a-z-]*:events:[a-z]+-[a-z-]+-\\d+:\\d{12}:event-bus/[\\w.-]+$" }, "NonPartnerEventBusName":{ "type":"string", @@ -3670,12 +3659,11 @@ "type":"string", "max":1600, "min":1, - "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[\\.\\-_A-Za-z0-9]+" + "pattern":"(arn:aws[\\w-]*:events:[a-z]+-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[\\.\\-_A-Za-z0-9]+" }, "OperationDisabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The operation you are attempting is not available in this region.

    ", "exception":true }, @@ -3800,8 +3788,7 @@ }, "PolicyLengthExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event bus policy is too long. For more information, see the limits.

    ", "exception":true }, @@ -4386,8 +4373,7 @@ }, "ResourceAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The resource you are trying to create already exists.

    ", "exception":true }, @@ -4410,8 +4396,7 @@ }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    An entity that you specified does not exist.

    ", "exception":true }, @@ -4798,8 +4783,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4947,8 +4931,7 @@ }, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This request cannot be completed due to throttling issues.

    ", "exception":true }, @@ -4988,8 +4971,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateApiDestinationRequest":{ "type":"structure", diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index 1acd69548cde..15451d6e4ff0 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/evs/pom.xml b/services/evs/pom.xml new file mode 100644 index 000000000000..c238e532c6b1 --- /dev/null +++ b/services/evs/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.31.65-SNAPSHOT + + evs + AWS Java SDK :: Services :: Evs + The AWS Java SDK for Evs module holds the client classes that are used for + communicating with Evs. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.evs + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..a5d16a60c9dd --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/evs/src/main/resources/codegen-resources/endpoint-tests.json b/services/evs/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..7555a890f574 --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/evs/src/main/resources/codegen-resources/paginators-1.json b/services/evs/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..c5e0850aedbf --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListEnvironmentHosts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environmentHosts" + }, + "ListEnvironmentVlans": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environmentVlans" + }, + "ListEnvironments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environmentSummaries" + } + } +} diff --git a/services/evs/src/main/resources/codegen-resources/service-2.json b/services/evs/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..78f3105f0a4f --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1356 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-07-27", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"evs", + "jsonVersion":"1.0", + "protocol":"json", + "protocols":["json"], + "serviceAbbreviation":"EVS", + "serviceFullName":"Amazon Elastic VMware Service", + "serviceId":"evs", + "signatureVersion":"v4", + "signingName":"evs", + "targetPrefix":"AmazonElasticVMwareService", + "uid":"evs-2023-07-27" + }, + "operations":{ + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEnvironmentRequest"}, + "output":{"shape":"CreateEnvironmentResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Creates an Amazon EVS environment that runs VCF software, such as SDDC Manager, NSX Manager, and vCenter Server.

    During environment creation, Amazon EVS performs validations on DNS settings, provisions VLAN subnets and hosts, and deploys the supplied version of VCF.

    It can take several hours to create an environment. After the deployment completes, you can configure VCF according to your unique requirements.

    You cannot use the dedicatedHostId and placementGroupId parameters together in the same CreateEnvironment action. This results in a ValidationException response.

    EC2 instances created through Amazon EVS do not support associating an IAM instance profile.

    ", + "idempotent":true + }, + "CreateEnvironmentHost":{ + "name":"CreateEnvironmentHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEnvironmentHostRequest"}, + "output":{"shape":"CreateEnvironmentHostResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates an ESXi host and adds it to an Amazon EVS environment. Amazon EVS supports 4-16 hosts per environment.

    This action can only be used after the Amazon EVS environment is deployed. All Amazon EVS hosts are created with the latest AMI release version for the respective VCF version of the environment.

    You can use the dedicatedHostId parameter to specify an Amazon EC2 Dedicated Host for ESXi host creation.

    You can use the placementGroupId parameter to specify a cluster or partition placement group to launch EC2 instances into.

    You cannot use the dedicatedHostId and placementGroupId parameters together in the same CreateEnvironmentHost action. This results in a ValidationException response.

    EC2 instances created through Amazon EVS do not support associating an IAM instance profile.

    ", + "idempotent":true + }, + "DeleteEnvironment":{ + "name":"DeleteEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEnvironmentRequest"}, + "output":{"shape":"DeleteEnvironmentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes an Amazon EVS environment.

    Amazon EVS environments will only be enabled for deletion once the hosts are deleted. You can delete hosts using the DeleteEnvironmentHost action.

    Environment deletion also deletes the associated Amazon EVS VLAN subnets. Other associated Amazon Web Services resources are not deleted. These resources may continue to incur costs.

    ", + "idempotent":true + }, + "DeleteEnvironmentHost":{ + "name":"DeleteEnvironmentHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEnvironmentHostRequest"}, + "output":{"shape":"DeleteEnvironmentHostResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes a host from an Amazon EVS environment.

    Before deleting a host, you must unassign and decommission the host from within the SDDC Manager user interface. Not doing so could impact the availability of your virtual machines or result in data loss.

    ", + "idempotent":true + }, + "GetEnvironment":{ + "name":"GetEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEnvironmentRequest"}, + "output":{"shape":"GetEnvironmentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns a description of the specified environment.

    " + }, + "ListEnvironmentHosts":{ + "name":"ListEnvironmentHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentHostsRequest"}, + "output":{"shape":"ListEnvironmentHostsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    List the hosts within an environment.

    " + }, + "ListEnvironmentVlans":{ + "name":"ListEnvironmentVlans", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentVlansRequest"}, + "output":{"shape":"ListEnvironmentVlansResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists environment VLANs that are associated with the specified environment.

    " + }, + "ListEnvironments":{ + "name":"ListEnvironments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentsRequest"}, + "output":{"shape":"ListEnvironmentsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists the Amazon EVS environments in your Amazon Web Services account in the specified Amazon Web Services Region.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the tags for an Amazon EVS resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"}, + {"shape":"TagPolicyException"} + ], + "documentation":"

    Associates the specified tags to an Amazon EVS resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they aren't changed. When a resource is deleted, the tags associated with that resource are also deleted. Tags that you create for Amazon EVS resources don't propagate to any other resources associated with the environment. For example, if you tag an environment with this operation, that tag doesn't automatically propagate to the VLAN subnets and hosts associated with the environment.

    ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TagPolicyException"} + ], + "documentation":"

    Deletes specified tags from an Amazon EVS resource.

    ", + "idempotent":true + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"arn:aws:evs:[a-z]{2}-[a-z]+-[0-9]:[0-9]{12}:environment/[a-zA-Z0-9_-]+" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "Check":{ + "type":"structure", + "members":{ + "type":{ + "shape":"CheckType", + "documentation":"

    The check type. Amazon EVS performs the following checks.

    • KEY_REUSE: checks that the VCF license key is not used by another Amazon EVS environment. This check fails if a used license is added to the environment.

    • KEY_COVERAGE: checks that your VCF license key allocates sufficient vCPU cores for all deployed hosts. The check fails when any assigned hosts in the EVS environment are not covered by license keys, or when any unassigned hosts cannot be covered by available vCPU cores in keys.

    • REACHABILITY: checks that the Amazon EVS control plane has a persistent connection to SDDC Manager. If Amazon EVS cannot reach the environment, this check fails.

    • HOST_COUNT: Checks that your environment has a minimum of 4 hosts, which is a requirement for VCF 5.2.1.

      If this check fails, you will need to add hosts so that your environment meets this minimum requirement. Amazon EVS only supports environments with 4-16 hosts.

    " + }, + "result":{ + "shape":"CheckResult", + "documentation":"

    The check result.

    " + }, + "impairedSince":{ + "shape":"Timestamp", + "documentation":"

    The time when environment health began to be impaired.

    " + } + }, + "documentation":"

    A check on the environment to identify environment health and validate VMware VCF licensing compliance.

    " + }, + "CheckResult":{ + "type":"string", + "enum":[ + "PASSED", + "FAILED", + "UNKNOWN" + ] + }, + "CheckType":{ + "type":"string", + "enum":[ + "KEY_REUSE", + "KEY_COVERAGE", + "REACHABILITY", + "HOST_COUNT" + ] + }, + "ChecksList":{ + "type":"list", + "member":{"shape":"Check"} + }, + "Cidr":{ + "type":"string", + "pattern":"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(3[0-2]|[1-2][0-9]|[0-9])" + }, + "ClientToken":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[!-~]+" + }, + "ConnectivityInfo":{ + "type":"structure", + "required":["privateRouteServerPeerings"], + "members":{ + "privateRouteServerPeerings":{ + "shape":"RouteServerPeeringList", + "documentation":"

    The unique IDs for private route server peers.

    " + } + }, + "documentation":"

    The connectivity configuration for the environment. Amazon EVS requires that you specify two route server peer IDs. During environment creation, the route server endpoints peer with the NSX uplink VLAN for connectivity to the NSX overlay network.

    " + }, + "CreateEnvironmentHostRequest":{ + "type":"structure", + "required":[ + "environmentId", + "host" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the host creation request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment that the host is added to.

    " + }, + "host":{ + "shape":"HostInfoForCreate", + "documentation":"

    The host that is created and added to the environment.

    " + } + } + }, + "CreateEnvironmentHostResponse":{ + "type":"structure", + "members":{ + "environmentSummary":{ + "shape":"EnvironmentSummary", + "documentation":"

    A summary of the environment that the host is created in.

    " + }, + "host":{ + "shape":"Host", + "documentation":"

    A description of the created host.

    " + } + } + }, + "CreateEnvironmentRequest":{ + "type":"structure", + "required":[ + "vpcId", + "serviceAccessSubnetId", + "vcfVersion", + "termsAccepted", + "licenseInfo", + "initialVlans", + "hosts", + "connectivityInfo", + "vcfHostnames", + "siteId" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the environment creation request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentName":{ + "shape":"EnvironmentName", + "documentation":"

    The name to give to your environment. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character, and can't be longer than 100 characters. The name must be unique within the Amazon Web Services Region and Amazon Web Services account that you're creating the environment in.

    " + }, + "kmsKeyId":{ + "shape":"String", + "documentation":"

    A unique ID for the customer-managed KMS key that is used to encrypt the VCF credential pairs for SDDC Manager, NSX Manager, and vCenter appliances. These credentials are stored in Amazon Web Services Secrets Manager.

    " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

    Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources.

    " + }, + "serviceAccessSecurityGroups":{ + "shape":"ServiceAccessSecurityGroups", + "documentation":"

    The security group that controls communication between the Amazon EVS control plane and VPC. The default security group is used if a custom security group isn't specified.

    The security group should allow access to the following.

    • TCP/UDP access to the DNS servers

    • HTTPS/SSH access to the host management VLAN subnet

    • HTTPS/SSH access to the Management VM VLAN subnet

    You should avoid modifying the security group rules after deployment, as this can break the persistent connection between the Amazon EVS control plane and VPC. This can cause future environment actions like adding or removing hosts to fail.

    " + }, + "vpcId":{ + "shape":"VpcId", + "documentation":"

    A unique ID for the VPC that connects to the environment control plane for service access.

    Amazon EVS requires that all VPC subnets exist in a single Availability Zone in a Region where the service is available.

    The VPC that you select must have a valid DHCP option set with domain name, at least two DNS servers, and an NTP server. These settings are used to configure your VCF appliances and hosts.

    If you plan to use HCX over the internet, choose a VPC that has a primary CIDR block and a /28 secondary CIDR block from an IPAM pool. Make sure that your VPC also has an attached internet gateway.

    Amazon EVS does not support the following Amazon Web Services networking options for NSX overlay connectivity: cross-Region VPC peering, Amazon S3 gateway endpoints, or Amazon Web Services Direct Connect virtual private gateway associations.

    " + }, + "serviceAccessSubnetId":{ + "shape":"SubnetId", + "documentation":"

    The subnet that is used to establish connectivity between the Amazon EVS control plane and VPC. Amazon EVS uses this subnet to validate mandatory DNS records for your VCF appliances and hosts and create the environment.

    " + }, + "vcfVersion":{ + "shape":"VcfVersion", + "documentation":"

    The VCF version to use for the environment. Amazon EVS only supports VCF version 5.2.1 at this time.

    " + }, + "termsAccepted":{ + "shape":"Boolean", + "documentation":"

    Customer confirmation that the customer has purchased and maintains sufficient VCF software licenses to cover all physical processor cores in the environment, in compliance with VMware's licensing requirements and terms of use.

    " + }, + "licenseInfo":{ + "shape":"LicenseInfoList", + "documentation":"

    The license information that Amazon EVS requires to create an environment. Amazon EVS requires two license keys: a VCF solution key and a vSAN license key. VCF licenses must have sufficient core entitlements to cover vCPU core and vSAN storage capacity needs.

    VCF licenses can be used for only one Amazon EVS environment. Amazon EVS does not support reuse of VCF licenses for multiple environments.

    VCF license information can be retrieved from the Broadcom portal.

    " + }, + "initialVlans":{ + "shape":"InitialVlans", + "documentation":"

    The initial VLAN subnets for the environment. You must specify a non-overlapping CIDR block for each VLAN subnet.

    " + }, + "hosts":{ + "shape":"HostInfoForCreateList", + "documentation":"

    The ESXi hosts to add to the environment. Amazon EVS requires that you provide details for a minimum of 4 hosts during environment creation.

    For each host, you must provide the desired hostname, EC2 SSH key, and EC2 instance type. Optionally, you can also provide a partition or cluster placement group to use, or use Amazon EC2 Dedicated Hosts.

    " + }, + "connectivityInfo":{ + "shape":"ConnectivityInfo", + "documentation":"

    The connectivity configuration for the environment. Amazon EVS requires that you specify two route server peer IDs. During environment creation, the route server endpoints peer with the NSX edges over the NSX, providing BGP dynamic routing for overlay networks.

    " + }, + "vcfHostnames":{ + "shape":"VcfHostnames", + "documentation":"

    The DNS hostnames for the virtual machines that host the VCF management appliances. Amazon EVS requires that you provide DNS hostnames for the following appliances: vCenter, NSX Manager, SDDC Manager, and Cloud Builder.

    " + }, + "siteId":{ + "shape":"String", + "documentation":"

    The Broadcom Site ID that is allocated to you as part of your electronic software delivery. This ID allows customer access to the Broadcom portal, and is provided to you by Broadcom at the close of your software contract or contract renewal. Amazon EVS uses the Broadcom Site ID that you provide to meet Broadcom VCF license usage reporting requirements for Amazon EVS.

    " + } + } + }, + "CreateEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

    A description of the created environment.

    " + } + } + }, + "DedicatedHostId":{ + "type":"string", + "max":25, + "min":1, + "pattern":"h-[a-f0-9]{8}([a-f0-9]{9})?" + }, + "DeleteEnvironmentHostRequest":{ + "type":"structure", + "required":[ + "environmentId", + "hostName" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the host deletion request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the host's environment.

    " + }, + "hostName":{ + "shape":"HostName", + "documentation":"

    The DNS hostname associated with the host to be deleted.

    " + } + } + }, + "DeleteEnvironmentHostResponse":{ + "type":"structure", + "members":{ + "environmentSummary":{ + "shape":"EnvironmentSummary", + "documentation":"

    A summary of the environment that the host was deleted from.

    " + }, + "host":{ + "shape":"Host", + "documentation":"

    A description of the deleted host.

    " + } + } + }, + "DeleteEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the environment deletion request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID associated with the environment to be deleted.

    " + } + } + }, + "DeleteEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

    A description of the deleted environment.

    " + } + } + }, + "Environment":{ + "type":"structure", + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    The unique ID for the environment.

    " + }, + "environmentState":{ + "shape":"EnvironmentState", + "documentation":"

    The state of an environment.

    " + }, + "stateDetails":{ + "shape":"StateDetails", + "documentation":"

    A detailed description of the environmentState of an environment.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was modified.

    " + }, + "environmentArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that is associated with the environment.

    " + }, + "environmentName":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the environment.

    " + }, + "vpcId":{ + "shape":"VpcId", + "documentation":"

    The VPC associated with the environment.

    " + }, + "serviceAccessSubnetId":{ + "shape":"SubnetId", + "documentation":"

    The subnet that is used to establish connectivity between the Amazon EVS control plane and VPC. Amazon EVS uses this subnet to perform validations and create the environment.

    " + }, + "vcfVersion":{ + "shape":"VcfVersion", + "documentation":"

    The VCF version of the environment.

    " + }, + "termsAccepted":{ + "shape":"Boolean", + "documentation":"

    Customer confirmation that the customer has purchased and maintains sufficient VCF software licenses to cover all physical processor cores in the environment, in compliance with VMware's licensing requirements and terms of use.

    " + }, + "licenseInfo":{ + "shape":"LicenseInfoList", + "documentation":"

    The license information that Amazon EVS requires to create an environment. Amazon EVS requires two license keys: a VCF solution key and a vSAN license key.

    " + }, + "siteId":{ + "shape":"String", + "documentation":"

    The Broadcom Site ID that is associated with your Amazon EVS environment. Amazon EVS uses the Broadcom Site ID that you provide to meet Broadcom VCF license usage reporting requirements for Amazon EVS.

    " + }, + "environmentStatus":{ + "shape":"CheckResult", + "documentation":"

    Reports impaired functionality that stems from issues internal to the environment, such as impaired reachability.

    " + }, + "checks":{ + "shape":"ChecksList", + "documentation":"

    A check on the environment to identify instance health and VMware VCF licensing issues.

    " + }, + "connectivityInfo":{ + "shape":"ConnectivityInfo", + "documentation":"

    The connectivity configuration for the environment. Amazon EVS requires that you specify two route server peer IDs. During environment creation, the route server endpoints peer with the NSX uplink VLAN for connectivity to the NSX overlay network.

    " + }, + "vcfHostnames":{ + "shape":"VcfHostnames", + "documentation":"

    The DNS hostnames to be used by the VCF management appliances in your environment.

    For environment creation to be successful, each hostname entry must resolve to a domain name that you've registered in your DNS service of choice and configured in the DHCP option set of your VPC. DNS hostnames cannot be changed after environment creation has started.

    " + }, + "kmsKeyId":{ + "shape":"String", + "documentation":"

    The Amazon Web Services KMS key ID that Amazon Web Services Secrets Manager uses to encrypt secrets that are associated with the environment. These secrets contain the VCF credentials that are needed to install vCenter Server, NSX, and SDDC Manager.

    By default, Amazon EVS use the Amazon Web Services Secrets Manager managed key aws/secretsmanager. You can also specify a customer managed key.

    " + }, + "serviceAccessSecurityGroups":{ + "shape":"ServiceAccessSecurityGroups", + "documentation":"

    The security groups that allow traffic between the Amazon EVS control plane and your VPC for service access. If a security group is not specified, Amazon EVS uses the default security group in your account for service access.

    " + }, + "credentials":{ + "shape":"SecretList", + "documentation":"

    The VCF credentials that are stored as Amazon EVS managed secrets in Amazon Web Services Secrets Manager.

    Amazon EVS stores credentials that are needed to install vCenter Server, NSX, and SDDC Manager.

    " + } + }, + "documentation":"

    An object that represents an Amazon EVS environment.

    " + }, + "EnvironmentId":{ + "type":"string", + "pattern":"(env-[a-zA-Z0-9]{10})" + }, + "EnvironmentName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "EnvironmentState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "DELETING", + "DELETED", + "CREATE_FAILED" + ] + }, + "EnvironmentStateList":{ + "type":"list", + "member":{"shape":"EnvironmentState"} + }, + "EnvironmentSummary":{ + "type":"structure", + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + }, + "environmentName":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the environment.

    " + }, + "vcfVersion":{ + "shape":"VcfVersion", + "documentation":"

    The VCF version of the environment.

    " + }, + "environmentStatus":{ + "shape":"CheckResult", + "documentation":"

    Reports impaired functionality that stems from issues internal to the environment, such as impaired reachability.

    " + }, + "environmentState":{ + "shape":"EnvironmentState", + "documentation":"

    The state of an environment.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was modified.

    " + }, + "environmentArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that is associated with the environment.

    " + } + }, + "documentation":"

    A list of environments with summarized environment details.

    " + }, + "EnvironmentSummaryList":{ + "type":"list", + "member":{"shape":"EnvironmentSummary"} + }, + "GetEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + } + } + }, + "GetEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

    A description of the requested environment.

    " + } + } + }, + "Host":{ + "type":"structure", + "members":{ + "hostName":{ + "shape":"HostName", + "documentation":"

    The DNS hostname of the host. DNS hostnames for hosts must be unique across Amazon EVS environments and within VCF.

    " + }, + "ipAddress":{ + "shape":"IpAddress", + "documentation":"

    The IP address of the host.

    " + }, + "keyName":{ + "shape":"KeyName", + "documentation":"

    The name of the SSH key that is used to access the host.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The EC2 instance type of the host.

    EC2 instances created through Amazon EVS do not support associating an IAM instance profile.

    " + }, + "placementGroupId":{ + "shape":"PlacementGroupId", + "documentation":"

    The unique ID of the placement group where the host is placed.

    " + }, + "dedicatedHostId":{ + "shape":"DedicatedHostId", + "documentation":"

    The unique ID of the Amazon EC2 Dedicated Host.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the host was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the host was modified.

    " + }, + "hostState":{ + "shape":"HostState", + "documentation":"

    The state of the host.

    " + }, + "stateDetails":{ + "shape":"StateDetails", + "documentation":"

    A detailed description of the hostState of a host.

    " + }, + "ec2InstanceId":{ + "shape":"String", + "documentation":"

    The unique ID of the EC2 instance that represents the host.

    " + }, + "networkInterfaces":{ + "shape":"NetworkInterfaceList", + "documentation":"

    The elastic network interfaces that are attached to the host.

    " + } + }, + "documentation":"

    An ESXi host that runs on an Amazon EC2 bare metal instance. Four hosts are created in an Amazon EVS environment during environment creation. You can add hosts to an environment using the CreateEnvironmentHost operation. Amazon EVS supports 4-16 hosts per environment.

    " + }, + "HostInfoForCreate":{ + "type":"structure", + "required":[ + "hostName", + "keyName", + "instanceType" + ], + "members":{ + "hostName":{ + "shape":"HostName", + "documentation":"

    The DNS hostname of the host. DNS hostnames for hosts must be unique across Amazon EVS environments and within VCF.

    " + }, + "keyName":{ + "shape":"KeyName", + "documentation":"

    The name of the SSH key that is used to access the host.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The EC2 instance type that represents the host.

    " + }, + "placementGroupId":{ + "shape":"PlacementGroupId", + "documentation":"

    The unique ID of the placement group where the host is placed.

    " + }, + "dedicatedHostId":{ + "shape":"DedicatedHostId", + "documentation":"

    The unique ID of the Amazon EC2 Dedicated Host.

    " + } + }, + "documentation":"

    An object that represents a host.

    You cannot use dedicatedHostId and placementGroupId together in the same HostInfoForCreateobject. This results in a ValidationException response.

    " + }, + "HostInfoForCreateList":{ + "type":"list", + "member":{"shape":"HostInfoForCreate"}, + "max":4, + "min":4 + }, + "HostList":{ + "type":"list", + "member":{"shape":"Host"} + }, + "HostName":{ + "type":"string", + "pattern":"([a-zA-Z0-9\\-]*)" + }, + "HostState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "UPDATING", + "DELETING", + "DELETED", + "CREATE_FAILED", + "UPDATE_FAILED" + ] + }, + "InitialVlanInfo":{ + "type":"structure", + "required":["cidr"], + "members":{ + "cidr":{ + "shape":"Cidr", + "documentation":"

    The CIDR block that you provide to create a VLAN subnet. VLAN CIDR blocks must not overlap with other subnets in the VPC.

    " + } + }, + "documentation":"

    An object that represents an initial VLAN subnet for the environment. Amazon EVS creates initial VLAN subnets when you first create the environment. You must specify a non-overlapping CIDR block for each VLAN subnet. Amazon EVS creates the following 10 VLAN subnets: host management VLAN, vMotion VLAN, vSAN VLAN, VTEP VLAN, Edge VTEP VLAN, Management VM VLAN, HCX uplink VLAN, NSX uplink VLAN, expansion VLAN 1, expansion VLAN 2.

    " + }, + "InitialVlans":{ + "type":"structure", + "required":[ + "vmkManagement", + "vmManagement", + "vMotion", + "vSan", + "vTep", + "edgeVTep", + "nsxUplink", + "hcx", + "expansionVlan1", + "expansionVlan2" + ], + "members":{ + "vmkManagement":{ + "shape":"InitialVlanInfo", + "documentation":"

    The VMkernel management VLAN subnet. This VLAN subnet carries traffic for managing ESXi hosts and communicating with VMware vCenter Server.

    " + }, + "vmManagement":{ + "shape":"InitialVlanInfo", + "documentation":"

    The VM management VLAN subnet. This VLAN subnet carries traffic for vSphere virtual machines.

    " + }, + "vMotion":{ + "shape":"InitialVlanInfo", + "documentation":"

    The vMotion VLAN subnet. This VLAN subnet carries traffic for vSphere vMotion.

    " + }, + "vSan":{ + "shape":"InitialVlanInfo", + "documentation":"

    The vSAN VLAN subnet. This VLAN subnet carries the communication between ESXi hosts to implement a vSAN shared storage pool.

    " + }, + "vTep":{ + "shape":"InitialVlanInfo", + "documentation":"

    The VTEP VLAN subnet. This VLAN subnet handles internal network traffic between virtual machines within a VCF instance.

    " + }, + "edgeVTep":{ + "shape":"InitialVlanInfo", + "documentation":"

    The edge VTEP VLAN subnet. This VLAN subnet manages traffic flowing between the internal network and external networks, including internet access and other site connections.

    " + }, + "nsxUplink":{ + "shape":"InitialVlanInfo", + "documentation":"

    The NSX uplink VLAN subnet. This VLAN subnet allows connectivity to the NSX overlay network.

    " + }, + "hcx":{ + "shape":"InitialVlanInfo", + "documentation":"

    The HCX VLAN subnet. This VLAN subnet allows the HCX Interconnnect (IX) and HCX Network Extension (NE) to reach their peers and enable HCX Service Mesh creation.

    " + }, + "expansionVlan1":{ + "shape":"InitialVlanInfo", + "documentation":"

    An additional VLAN subnet that can be used to extend VCF capabilities once configured. For example, you can configure an expansion VLAN subnet to use NSX Federation for centralized management and synchronization of multiple NSX deployments across different locations.

    " + }, + "expansionVlan2":{ + "shape":"InitialVlanInfo", + "documentation":"

    An additional VLAN subnet that can be used to extend VCF capabilities once configured. For example, you can configure an expansion VLAN subnet to use NSX Federation for centralized management and synchronization of multiple NSX deployments across different locations.

    " + } + }, + "documentation":"

    The initial VLAN subnets for the environment. You must specify a non-overlapping CIDR block for each VLAN subnet.

    " + }, + "InstanceType":{ + "type":"string", + "enum":["i4i.metal"] + }, + "Integer":{ + "type":"integer", + "box":true + }, + "IpAddress":{ + "type":"string", + "pattern":"(\\d{1,3}\\.){3}\\d{1,3}" + }, + "KeyName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "LicenseInfo":{ + "type":"structure", + "required":[ + "solutionKey", + "vsanKey" + ], + "members":{ + "solutionKey":{ + "shape":"SolutionKey", + "documentation":"

    The VCF solution key. This license unlocks VMware VCF product features, including vSphere, NSX, SDDC Manager, and vCenter Server.

    " + }, + "vsanKey":{ + "shape":"VSanLicenseKey", + "documentation":"

    The VSAN license key. This license unlocks vSAN features.

    " + } + }, + "documentation":"

    The license information that Amazon EVS requires to create an environment. Amazon EVS requires two license keys: a VCF solution key and a vSAN license key.

    " + }, + "LicenseInfoList":{ + "type":"list", + "member":{"shape":"LicenseInfo"}, + "max":1, + "min":1 + }, + "ListEnvironmentHostsRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for each page. If nextToken is returned, there are more results available. Make the call again using the returned token with all other arguments unchanged to retrieve the next page. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return. If you specify MaxResults in the request, the response includes information up to the limit specified.

    " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + } + } + }, + "ListEnvironmentHostsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for next page results. Make the call again using this token to retrieve the next page.

    " + }, + "environmentHosts":{ + "shape":"HostList", + "documentation":"

    A list of hosts in the environment.

    " + } + } + }, + "ListEnvironmentVlansRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for each page. If nextToken is returned, there are more results available. Make the call again using the returned token with all other arguments unchanged to retrieve the next page. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return. If you specify MaxResults in the request, the response includes information up to the limit specified.

    " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + } + } + }, + "ListEnvironmentVlansResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for next page results. Make the call again using this token to retrieve the next page.

    " + }, + "environmentVlans":{ + "shape":"VlanList", + "documentation":"

    A list of VLANs that are associated with the specified environment.

    " + } + } + }, + "ListEnvironmentsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for each page. If nextToken is returned, there are more results available. Make the call again using the returned token with all other arguments unchanged to retrieve the next page. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return. If you specify MaxResults in the request, the response includes information up to the limit specified.

    " + }, + "state":{ + "shape":"EnvironmentStateList", + "documentation":"

    The state of an environment. Used to filter response results to return only environments with the specified environmentState.

    " + } + } + }, + "ListEnvironmentsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for next page results. Make the call again using this token to retrieve the next page.

    " + }, + "environmentSummaries":{ + "shape":"EnvironmentSummaryList", + "documentation":"

    A list of environments with summarized environment details.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the resource to list tags for.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"ResponseTagMap", + "documentation":"

    The tags for the resource.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "networkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

    The unique ID of the elastic network interface.

    " + } + }, + "documentation":"

    An elastic network interface (ENI) that connects hosts to the VLAN subnets. Amazon EVS provisions two identically configured ENIs in the VMkernel management subnet during host creation. One ENI is active, and the other is in standby mode for automatic switchover during a failure scenario.

    " + }, + "NetworkInterfaceId":{ + "type":"string", + "max":100, + "min":1 + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{"shape":"NetworkInterface"}, + "max":2, + "min":0 + }, + "PaginationToken":{"type":"string"}, + "PlacementGroupId":{ + "type":"string", + "max":25, + "min":1, + "pattern":"pg-[a-f0-9]{8}([a-f0-9]{9})?" + }, + "RequestTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource that could not be found.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of the resource that is associated with the error.

    " + } + }, + "documentation":"

    A service resource associated with the request could not be found. The resource might not be specified correctly, or it may have a state of DELETED.

    ", + "exception":true + }, + "ResponseTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "RouteServerPeering":{ + "type":"string", + "max":21, + "min":3 + }, + "RouteServerPeeringList":{ + "type":"list", + "member":{"shape":"RouteServerPeering"}, + "max":2, + "min":2 + }, + "Secret":{ + "type":"structure", + "members":{ + "secretArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the secret.

    " + } + }, + "documentation":"

    A managed secret that contains the credentials for installing vCenter Server, NSX, and SDDC Manager. During environment creation, the Amazon EVS control plane uses Amazon Web Services Secrets Manager to create, encrypt, validate, and store secrets. If you choose to delete your environment, Amazon EVS also deletes the secrets that are associated with your environment. Amazon EVS does not provide managed rotation of secrets. We recommend that you rotate secrets regularly to ensure that secrets are not long-lived.

    " + }, + "SecretList":{ + "type":"list", + "member":{"shape":"Secret"} + }, + "SecurityGroupId":{ + "type":"string", + "max":25, + "min":3, + "pattern":"sg-[0-9a-zA-Z]*" + }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":2, + "min":0 + }, + "ServiceAccessSecurityGroups":{ + "type":"structure", + "members":{ + "securityGroups":{ + "shape":"SecurityGroups", + "documentation":"

    The security groups that allow service access.

    " + } + }, + "documentation":"

    The security groups that allow traffic between the Amazon EVS control plane and your VPC for Amazon EVS service access. If a security group is not specified, Amazon EVS uses the default security group in your account for service access.

    " + }, + "SolutionKey":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}" + }, + "StateDetails":{"type":"string"}, + "String":{"type":"string"}, + "SubnetId":{ + "type":"string", + "max":24, + "min":15, + "pattern":"subnet-[a-f0-9]{8}([a-f0-9]{9})?" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w.:/=+-@]+" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagPolicyException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered

    " + } + }, + "documentation":"

    The request doesn't comply with IAM tag policy. Correct your request and then retry it.

    ", + "exception":true + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to add tags to.

    " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

    Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other environment or Amazon Web Services resources.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\w.:/=+-@]+|" + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    The seconds to wait to retry.

    " + } + }, + "documentation":"

    The CreateEnvironmentHost operation couldn't be performed because the service is throttling requests. This exception is thrown when the CreateEnvironmentHost request exceeds concurrency of 1 transaction per second (TPS).

    ", + "exception":true, + "retryable":{"throttling":false} + }, + "Timestamp":{"type":"timestamp"}, + "TooManyTagsException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + } + }, + "documentation":"

    A service resource associated with the request has more than 200 tags.

    ", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to delete tags from.

    " + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    The keys of the tags to delete.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "VSanLicenseKey":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}" + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the exception.

    " + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    A list of fields that didn't validate.

    " + } + }, + "documentation":"

    The input fails to satisfy the specified constraints. You will see this exception if invalid inputs are provided for any of the Amazon EVS environment operations, or if a list operation is performed on an environment resource that is still initializing.

    ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The field name.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    A message describing why the field failed validation.

    " + } + }, + "documentation":"

    Stores information about a field passed inside a request that resulted in an exception.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "VcfHostnames":{ + "type":"structure", + "required":[ + "vCenter", + "nsx", + "nsxManager1", + "nsxManager2", + "nsxManager3", + "nsxEdge1", + "nsxEdge2", + "sddcManager", + "cloudBuilder" + ], + "members":{ + "vCenter":{ + "shape":"HostName", + "documentation":"

    The VMware vCenter hostname.

    " + }, + "nsx":{ + "shape":"HostName", + "documentation":"

    The VMware NSX hostname.

    " + }, + "nsxManager1":{ + "shape":"HostName", + "documentation":"

    The hostname for the first VMware NSX Manager virtual machine (VM).

    " + }, + "nsxManager2":{ + "shape":"HostName", + "documentation":"

    The hostname for the second VMware NSX Manager virtual machine (VM).

    " + }, + "nsxManager3":{ + "shape":"HostName", + "documentation":"

    The hostname for the third VMware NSX Manager virtual machine (VM).

    " + }, + "nsxEdge1":{ + "shape":"HostName", + "documentation":"

    The hostname for the first NSX Edge node.

    " + }, + "nsxEdge2":{ + "shape":"HostName", + "documentation":"

    The hostname for the second NSX Edge node.

    " + }, + "sddcManager":{ + "shape":"HostName", + "documentation":"

    The hostname for SDDC Manager.

    " + }, + "cloudBuilder":{ + "shape":"HostName", + "documentation":"

    The hostname for VMware Cloud Builder.

    " + } + }, + "documentation":"

    The DNS hostnames that Amazon EVS uses to install VMware vCenter Server, NSX, SDDC Manager, and Cloud Builder. Each hostname must be unique, and resolve to a domain name that you've registered in your DNS service of choice. Hostnames cannot be changed.

    VMware VCF requires the deployment of two NSX Edge nodes, and three NSX Manager virtual machines.

    " + }, + "VcfVersion":{ + "type":"string", + "enum":["VCF-5.2.1"] + }, + "Vlan":{ + "type":"structure", + "members":{ + "vlanId":{ + "shape":"VlanId", + "documentation":"

    The unique ID of the VLAN.

    " + }, + "cidr":{ + "shape":"Cidr", + "documentation":"

    The CIDR block of the VLAN.

    " + }, + "availabilityZone":{ + "shape":"String", + "documentation":"

    The availability zone of the VLAN.

    " + }, + "functionName":{ + "shape":"String", + "documentation":"

    The VMware VCF traffic type that is carried over the VLAN. For example, a VLAN with a functionName of hcx is being used to carry VMware HCX traffic.

    " + }, + "subnetId":{ + "shape":"SubnetId", + "documentation":"

    The unique ID of the VLAN subnet.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the VLAN was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the VLAN was modified.

    " + }, + "vlanState":{ + "shape":"VlanState", + "documentation":"

    The state of the VLAN.

    " + }, + "stateDetails":{ + "shape":"StateDetails", + "documentation":"

    The state details of the VLAN.

    " + } + }, + "documentation":"

    The VLANs that Amazon EVS creates during environment creation.

    " + }, + "VlanId":{ + "type":"integer", + "box":true + }, + "VlanList":{ + "type":"list", + "member":{"shape":"Vlan"} + }, + "VlanState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "DELETING", + "DELETED", + "CREATE_FAILED" + ] + }, + "VpcId":{ + "type":"string", + "max":21, + "min":12, + "pattern":"vpc-[a-f0-9]{8}([a-f0-9]{9})?" + } + }, + "documentation":"

    Amazon Elastic VMware Service (Amazon EVS) is a service that you can use to deploy a VMware Cloud Foundation (VCF) software environment directly on EC2 bare metal instances within an Amazon Virtual Private Cloud (VPC).

    Workloads running on Amazon EVS are fully compatible with workloads running on any standard VMware vSphere environment. This means that you can migrate any VMware-based workload to Amazon EVS without workload modification.

    " +} diff --git a/services/evs/src/main/resources/codegen-resources/waiters-2.json b/services/evs/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index 2d361b361fa4..0ca882df720d 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 24f6edd989de..44c881607439 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 0a936ed81713..3f30cf0fd7d4 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fis/pom.xml b/services/fis/pom.xml index ca808ce96e3b..5caebe553497 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index c7533ec5df3e..2a512f29853a 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index 896bdcd57ea2..307a65995434 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 592768d6c632..f9f914371304 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 420a55aa9571..a1c3ec0b0a75 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/freetier/pom.xml b/services/freetier/pom.xml index 214dc6d60c82..8391d845ca7e 100644 --- a/services/freetier/pom.xml +++ b/services/freetier/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT freetier AWS Java SDK :: Services :: Free Tier diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index ad6746ef1b7f..37c751605b84 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 911d72794566..f9fadae9320a 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -740,7 +740,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • DiskIopsConfiguration

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • FileSystemTypeVersion

    • LogConfiguration

    • LustreRootSquashConfiguration

    • MetadataConfiguration

    • PerUnitStorageThroughput

    • StorageCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • HAPairs

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • ThroughputCapacityPerHAPair

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • ReadCacheConfiguration

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " + "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • FileSystemTypeVersion

    • LogConfiguration

    • LustreReadCacheConfiguration

    • LustreRootSquashConfiguration

    • MetadataConfiguration

    • PerUnitStorageThroughput

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • HAPairs

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • ThroughputCapacityPerHAPair

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • ReadCacheConfiguration

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " }, "UpdateSharedVpcConfiguration":{ "name":"UpdateSharedVpcConfiguration", @@ -1676,7 +1676,7 @@ "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, "StorageType":{ "shape":"StorageType", - "documentation":"

    Sets the storage type for the Windows or OpenZFS file system that you're creating from a backup. Valid values are SSD and HDD.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.

    The default value is SSD.

    HDD and SSD storage types have different minimum storage capacity requirements. A restored file system's storage capacity is tied to the file system that was backed up. You can create a file system that uses HDD storage from a backup of a file system that used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.

    " + "documentation":"

    Sets the storage type for the Windows, OpenZFS, or Lustre file system that you're creating from a backup. Valid values are SSD, HDD, and INTELLIGENT_TIERING.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.

    • Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type and for Lustre file systems with the Persistent_2 deployment type.

    The default value is SSD.

    HDD and SSD storage types have different minimum storage capacity requirements. A restored file system's storage capacity is tied to the file system that was backed up. You can create a file system that uses HDD storage from a backup of a file system that used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.

    " }, "KmsKeyId":{"shape":"KmsKeyId"}, "FileSystemTypeVersion":{ @@ -1725,7 +1725,7 @@ }, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

    (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

    Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

    If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

    Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

    (Default = SCRATCH_1)

    " + "documentation":"

    (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

    Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports the SSD and Intelligent-Tiering storage classes. You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see Deployment and storage class options for FSx for Lustre file systems in the Amazon FSx for Lustre User Guide.

    If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

    Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

    (Default = SCRATCH_1)

    " }, "AutoImportPolicy":{ "shape":"AutoImportPolicyType", @@ -1733,7 +1733,7 @@ }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", - "documentation":"

    Required with PERSISTENT_1 and PERSISTENT_2 deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision.

    Valid values:

    • For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

    • For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

    • For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

    " + "documentation":"

    Required with PERSISTENT_1 and PERSISTENT_2 deployment types using an SSD or HDD storage class, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision.

    Valid values:

    • For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

    • For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

    • For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

    " }, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "AutomaticBackupRetentionDays":{ @@ -1767,6 +1767,14 @@ "MetadataConfiguration":{ "shape":"CreateFileSystemLustreMetadataConfiguration", "documentation":"

    The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    " + }, + "ThroughputCapacity":{ + "shape":"ThroughputCapacityMbps", + "documentation":"

    Specifies the throughput of an FSx for Lustre file system using the Intelligent-Tiering storage class, measured in megabytes per second (MBps). Valid values are 4000 MBps or multiples of 4000 MBps. You pay for the amount of throughput that you provision.

    " + }, + "DataReadCacheConfiguration":{ + "shape":"LustreReadCacheConfiguration", + "documentation":"

    Specifies the optional provisioned SSD read cache on FSx for Lustre file systems that use the Intelligent-Tiering storage class. Required when StorageType is set to INTELLIGENT_TIERING.

    " } }, "documentation":"

    The Lustre configuration for the file system being created.

    The following parameters are not supported for file systems with a data repository association created with .

    • AutoImportPolicy

    • ExportPath

    • ImportedFileChunkSize

    • ImportPath

    " @@ -1777,11 +1785,11 @@ "members":{ "Iops":{ "shape":"MetadataIops", - "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity.

    " + "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system.

    • For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    • For Intelligent-Tiering file systems, valid values are 6000 and 12000.

    Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity.

    " }, "Mode":{ "shape":"MetadataConfigurationMode", - "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system.

    " + "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • In AUTOMATIC mode (supported only on SSD file systems), FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system.

    " } }, "documentation":"

    The Lustre metadata performance configuration for the creation of an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. The configuration uses a Metadata IOPS value to set the maximum rate of metadata disk IOPS supported by the file system.

    After creation, the file system supports increasing metadata performance. For more information on Metadata IOPS, see Lustre metadata performance configuration in the Amazon FSx for Lustre User Guide.

    " @@ -1855,7 +1863,7 @@ }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    You pay for additional throughput capacity that you provision.

    " + "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType that you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    You pay for additional throughput capacity that you provision.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, @@ -1869,7 +1877,7 @@ }, "EndpointIpAddressRange":{ "shape":"IpAddressRange", - "documentation":"

    (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.

    " + "documentation":"

    (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.

    " }, "RouteTableIds":{ "shape":"RouteTableIds", @@ -1904,7 +1912,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"

    Sets the storage class for the file system that you're creating. Valid values are SSD, HDD, and INTELLIGENT_TIERING.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.

    • Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type.

    Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide, Multiple storage options in the FSx for Lustre User Guide, and Working with Intelligent-Tiering in the Amazon FSx for OpenZFS User Guide.

    " + "documentation":"

    Sets the storage class for the file system that you're creating. Valid values are SSD, HDD, and INTELLIGENT_TIERING.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage, which is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.

    • Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type and for Lustre file systems with the Persistent_2 deployment type.

    Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide, FSx for Lustre storage classes in the FSx for Lustre User Guide, and Working with Intelligent-Tiering in the Amazon FSx for OpenZFS User Guide.

    " }, "SubnetIds":{ "shape":"SubnetIds", @@ -2097,7 +2105,7 @@ }, "CopyTagsToSnapshots":{ "shape":"Flag", - "documentation":"

    A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.

    " + "documentation":"

    A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If this value is set to true, and you do not specify any tags, all tags for the original volume are copied over to snapshots. If this value is set to true, and you do specify one or more tags, only the specified tags for the original volume are copied over to snapshots. If you specify one or more tags when creating a new snapshot, no tags are copied over from the original volume, regardless of this value.

    " }, "OriginSnapshot":{ "shape":"CreateOpenZFSOriginSnapshotConfiguration", @@ -2381,7 +2389,7 @@ "documentation":"

    The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

    " } }, - "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    " + "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding Intelligent-Tiering and scratch_1 file systems.

    " }, "DataRepositoryAssociationId":{ "type":"string", @@ -3187,8 +3195,7 @@ }, "DescribeSharedVpcConfigurationRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeSharedVpcConfigurationResponse":{ "type":"structure", @@ -3668,7 +3675,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"

    The type of storage the file system is using. If set to SSD, the file system uses solid state drive storage. If set to HDD, the file system uses hard disk drive storage.

    " + "documentation":"

    The type of storage the file system is using.

    • If set to SSD, the file system uses solid state drive storage.

    • If set to HDD, the file system uses hard disk drive storage.

    • If set to INTELLIGENT_TIERING, the file system uses fully elastic, intelligently-tiered storage.

    " }, "VpcId":{ "shape":"VpcId", @@ -3795,11 +3802,11 @@ "members":{ "Iops":{ "shape":"MetadataIops", - "documentation":"

    The number of Metadata IOPS provisioned for the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    " + "documentation":"

    The number of Metadata IOPS provisioned for the file system.

    • For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    • For Intelligent-Tiering file systems, valid values are 6000 and 12000.

    " }, "Mode":{ "shape":"MetadataConfigurationMode", - "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for the file system.

    • In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system.

    " + "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for the file system.

    • In AUTOMATIC mode (supported only on SSD file systems), FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system.

    " } }, "documentation":"

    The Lustre metadata performance configuration of an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. The configuration enables the file system to support increasing metadata performance.

    " @@ -3828,7 +3835,7 @@ }, "FileSystemType":{ "type":"string", - "documentation":"

    The type of file system.

    ", + "documentation":"

    The type of Amazon FSx file system.

    ", "enum":[ "WINDOWS", "LUSTRE", @@ -4150,7 +4157,7 @@ "DataRepositoryConfiguration":{"shape":"DataRepositoryConfiguration"}, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

    The default is SCRATCH_1.

    " + "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see Deployment and storage class options for FSx for Lustre file systems.

    The default is SCRATCH_1.

    " }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", @@ -4189,6 +4196,14 @@ "EfaEnabled":{ "shape":"Flag", "documentation":"

    Specifies whether Elastic Fabric Adapter (EFA) and GPUDirect Storage (GDS) support is enabled for the Amazon FSx for Lustre file system.

    " + }, + "ThroughputCapacity":{ + "shape":"ThroughputCapacityMbps", + "documentation":"

    The throughput of an Amazon FSx for Lustre file system using the Intelligent-Tiering storage class, measured in megabytes per second (MBps).

    " + }, + "DataReadCacheConfiguration":{ + "shape":"LustreReadCacheConfiguration", + "documentation":"

    Required when StorageType is set to INTELLIGENT_TIERING. Specifies the optional provisioned SSD read cache.

    " } }, "documentation":"

    The configuration for the Amazon FSx for Lustre file system.

    " @@ -4240,6 +4255,28 @@ "member":{"shape":"LustreNoSquashNid"}, "max":64 }, + "LustreReadCacheConfiguration":{ + "type":"structure", + "members":{ + "SizingMode":{ + "shape":"LustreReadCacheSizingMode", + "documentation":"

    Specifies how the provisioned SSD read cache is sized, as follows:

    • Set to NO_CACHE if you do not want to use an SSD read cache with your Intelligent-Tiering file system.

    • Set to USER_PROVISIONED to specify the exact size of your SSD read cache.

    • Set to PROPORTIONAL_TO_THROUGHPUT_CAPACITY to have your SSD read cache automatically sized based on your throughput capacity.

    " + }, + "SizeGiB":{ + "shape":"StorageCapacity", + "documentation":"

    Required if SizingMode is set to USER_PROVISIONED. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).

    The SSD read cache size is distributed across provisioned file servers in your file system. Intelligent-Tiering file systems support a minimum of 32 GiB and maximum of 131072 GiB for SSD read cache size for every 4,000 MB/s of throughput capacity provisioned.

    " + } + }, + "documentation":"

    The configuration for the optional provisioned SSD read cache on Amazon FSx for Lustre file systems that use the Intelligent-Tiering storage class.

    " + }, + "LustreReadCacheSizingMode":{ + "type":"string", + "enum":[ + "NO_CACHE", + "USER_PROVISIONED", + "PROPORTIONAL_TO_THROUGHPUT_CAPACITY" + ] + }, "LustreRootSquash":{ "type":"string", "max":21, @@ -4715,7 +4752,7 @@ "documentation":"

    Required if SizingMode is set to USER_PROVISIONED. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).

    " } }, - "documentation":"

    The configuration for the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.

    " + "documentation":"

    The configuration for the optional provisioned SSD read cache on Amazon FSx for OpenZFS file systems that use the Intelligent-Tiering storage class.

    " }, "OpenZFSReadCacheSizingMode":{ "type":"string", @@ -5178,7 +5215,7 @@ }, "ServiceLimit":{ "type":"string", - "documentation":"

    The types of limits on your service utilization. Limits include file system count, total throughput capacity, total storage, and total user-initiated backups. These limits apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by contacting Amazon Web Services Support.

    ", + "documentation":"

    The types of limits on your service utilization. Limits include file system count, total throughput capacity, total storage, and total user-initiated backups. These limits apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by contacting Amazon Web ServicesSupport.

    ", "enum":[ "FILE_SYSTEM_COUNT", "TOTAL_THROUGHPUT_CAPACITY", @@ -5202,7 +5239,7 @@ }, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web Services Support.

    ", + "documentation":"

    An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web ServicesSupport.

    ", "exception":true }, "SizeInBytes":{ @@ -5692,8 +5729,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response object for the TagResource operation.

    " }, "TagValue":{ @@ -5721,6 +5757,11 @@ "member":{"shape":"TaskId"}, "max":50 }, + "ThroughputCapacityMbps":{ + "type":"integer", + "max":2000000, + "min":4000 + }, "ThroughputCapacityPerHAPair":{ "type":"integer", "max":6144, @@ -5796,8 +5837,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response object for UntagResource action.

    " }, "UpdateDataRepositoryAssociationRequest":{ @@ -5900,6 +5940,14 @@ "MetadataConfiguration":{ "shape":"UpdateFileSystemLustreMetadataConfiguration", "documentation":"

    The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. When this configuration is enabled, the file system supports increasing metadata performance.

    " + }, + "ThroughputCapacity":{ + "shape":"ThroughputCapacityMbps", + "documentation":"

    The throughput of an Amazon FSx for Lustre file system using an Intelligent-Tiering storage class, measured in megabytes per second (MBps). You can only increase your file system's throughput. Valid values are 4000 MBps or multiples of 4000 MBps.

    " + }, + "DataReadCacheConfiguration":{ + "shape":"LustreReadCacheConfiguration", + "documentation":"

    Specifies the optional provisioned SSD read cache on Amazon FSx for Lustre file systems that use the Intelligent-Tiering storage class.

    " } }, "documentation":"

    The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.

    " @@ -5909,11 +5957,11 @@ "members":{ "Iops":{ "shape":"MetadataIops", - "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    " + "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system.

    • For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    • For Intelligent-Tiering file systems, valid values are 6000 and 12000.

    The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    " }, "Mode":{ "shape":"MetadataConfigurationMode", - "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • To increase the Metadata IOPS or to switch from AUTOMATIC mode, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    • To switch from USER_PROVISIONED mode, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops.

      If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported.

    " + "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • To increase the Metadata IOPS or to switch an SSD file system from AUTOMATIC, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    • To switch from USER_PROVISIONED mode on an SSD file system, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops.

      • If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported.

      • AUTOMATIC mode is not supported on Intelligent-Tiering file systems. For Intelligent-Tiering file systems, use USER_PROVISIONED mode.

    " } }, "documentation":"

    The Lustre metadata performance configuration update for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. You can request an increase in your file system's Metadata IOPS and/or switch your file system's metadata configuration mode. For more information, see Managing metadata performance in the Amazon FSx for Lustre User Guide.

    " @@ -6475,7 +6523,7 @@ }, "WeeklyTime":{ "type":"string", - "documentation":"

    A recurring weekly time, in the format D:HH:MM.

    D is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see the ISO-8601 spec as described on Wikipedia.

    HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour.

    For example, 1:05:00 specifies maintenance at 5 AM Monday.

    ", + "documentation":"

    The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

    For example, 1:05:00 specifies maintenance at 5 AM Monday.

    ", "max":7, "min":7, "pattern":"^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$" diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 16144bc8be5c..75d01885e9ea 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gameliftstreams/pom.xml b/services/gameliftstreams/pom.xml index ba5d952d1db8..fa7c5d1407c0 100644 --- a/services/gameliftstreams/pom.xml +++ b/services/gameliftstreams/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT gameliftstreams AWS Java SDK :: Services :: Game Lift Streams diff --git a/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json b/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json index 88e5938700dc..3c3bd7cdc87d 100644 --- a/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json +++ b/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json @@ -70,7 +70,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Creates an application resource in Amazon GameLift Streams, which specifies the application content you want to stream, such as a game build or other software, and configures the settings to run it.

    Before you create an application, upload your application content files to an Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

    Make sure that your files in the Amazon S3 bucket are the correct version you want to use. As soon as you create a Amazon GameLift Streams application, you cannot change the files at a later time.

    If the request is successful, Amazon GameLift Streams begins to create an application and sets the status to INITIALIZED. When an application reaches READY status, you can use the application to set up stream groups and start streams. To track application status, call GetApplication.

    " + "documentation":"

    Creates an application resource in Amazon GameLift Streams, which specifies the application content you want to stream, such as a game build or other software, and configures the settings to run it.

    Before you create an application, upload your application content files to an Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

    Make sure that your files in the Amazon S3 bucket are the correct version you want to use. If you change the files at a later time, you will need to create a new Amazon GameLift Streams application.

    If the request is successful, Amazon GameLift Streams begins to create an application and sets the status to INITIALIZED. When an application reaches READY status, you can use the application to set up stream groups and start streams. To track application status, call GetApplication.

    " }, "CreateStreamGroup":{ "name":"CreateStreamGroup", @@ -90,7 +90,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Manage how Amazon GameLift Streams streams your applications by using a stream group. A stream group is a collection of resources that Amazon GameLift Streams uses to stream your application to end-users. When you create a stream group, you specify an application to stream by default and the type of hardware to use, such as the graphical processing unit (GPU). You can also link additional applications, which allows you to stream those applications using this stream group. Depending on your expected users, you also scale the number of concurrent streams you want to support at one time, and in what locations.

    Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:

    • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

       </p> </li> <li> <p> <b>On-demand</b>: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). </p> </li> </ul> <p> To adjust the capacity of any <code>ACTIVE</code> stream group, call <a href="https://docs.aws.amazon.com/gameliftstreams/latest/apireference/API_UpdateStreamGroup.html">UpdateStreamGroup</a>. </p> <p> If the request is successful, Amazon GameLift Streams begins creating the stream group. Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to <code>ACTIVATING</code>. When the stream group reaches <code>ACTIVE</code> status, you can start stream sessions by using <a href="https://docs.aws.amazon.com/gameliftstreams/latest/apireference/API_StartStreamSession.html">StartStreamSession</a>. To check the stream group's status, call <a href="https://docs.aws.amazon.com/gameliftstreams/latest/apireference/API_GetStreamGroup.html">GetStreamGroup</a>. </p> 
      ", + "documentation":"

      Manage how Amazon GameLift Streams streams your applications by using a stream group. A stream group is a collection of resources that Amazon GameLift Streams uses to stream your application to end-users. When you create a stream group, you specify an application to stream by default and the type of hardware to use, such as the graphical processing unit (GPU). You can also link additional applications, which allows you to stream those applications using this stream group. Depending on your expected users, you also scale the number of concurrent streams you want to support at one time, and in what locations.

      Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity, always-on and on-demand:

      • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

      • On-demand: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

      To adjust the capacity of any ACTIVE stream group, call UpdateStreamGroup.

      If the request is successful, Amazon GameLift Streams begins creating the stream group. Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to ACTIVATING. When the stream group reaches ACTIVE status, you can start stream sessions by using StartStreamSession. To check the stream group's status, call GetStreamGroup.

      ", "idempotent":true }, "CreateStreamSessionConnection":{ @@ -363,7 +363,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"} ], - "documentation":"

      This action initiates a new stream session and outputs connection information that clients can use to access the stream. A stream session refers to an instance of a stream that Amazon GameLift Streams transmits from the server to the end-user. A stream session runs on a compute resource, or stream capacity, that a stream group has allocated.

      To start a new stream session, specify a stream group and application ID, along with the transport protocol and signal request settings to use with the stream. You must have associated at least one application to the stream group before starting a stream session, either when creating the stream group, or by using AssociateApplications.

      For stream groups that have multiple locations, provide a set of locations ordered by priority by setting Locations. Amazon GameLift Streams will start a single stream session in the next available location. An application must be finished replicating in a remote location before the remote location can host a stream.

      If the request is successful, Amazon GameLift Streams begins to prepare the stream. Amazon GameLift Streams assigns an Amazon Resource Name (ARN) value to the stream session resource and sets the status to ACTIVATING. During the stream preparation process, Amazon GameLift Streams queues the request and searches for available stream capacity to run the stream. This can result to one of the following:

      • Amazon GameLift Streams identifies an available compute resource to run the application content and start the stream. When the stream is ready, the stream session's status changes to ACTIVE and includes stream connection information. Provide the connection information to the requesting client to join the stream session.

      • Amazon GameLift Streams doesn't identify an available resource within a certain time, set by ClientToken. In this case, Amazon GameLift Streams stops processing the request, and the stream session object status changes to ERROR with status reason placementTimeout.

      " + "documentation":"

      This action initiates a new stream session and outputs connection information that clients can use to access the stream. A stream session refers to an instance of a stream that Amazon GameLift Streams transmits from the server to the end-user. A stream session runs on a compute resource that a stream group has allocated.

      To start a new stream session, specify a stream group and application ID, along with the transport protocol and signal request settings to use with the stream. You must have associated at least one application to the stream group before starting a stream session, either when creating the stream group, or by using AssociateApplications.

      For stream groups that have multiple locations, provide a set of locations ordered by priority using a Locations parameter. Amazon GameLift Streams will start a single stream session in the next available location. An application must be finished replicating in a remote location before the remote location can host a stream.

      If the request is successful, Amazon GameLift Streams begins to prepare the stream. Amazon GameLift Streams assigns an Amazon Resource Name (ARN) value to the stream session resource and sets the status to ACTIVATING. During the stream preparation process, Amazon GameLift Streams queues the request and searches for available stream capacity to run the stream. This results in one of the following:

      • Amazon GameLift Streams identifies an available compute resource to run the application content and start the stream. When the stream is ready, the stream session's status changes to ACTIVE and includes stream connection information. Provide the connection information to the requesting client to join the stream session.

      • Amazon GameLift Streams doesn't identify an available resource within a certain time, set by ClientToken. In this case, Amazon GameLift Streams stops processing the request, and the stream session object status changes to ERROR with status reason placementTimeout.

      " }, "TagResource":{ "name":"TagResource", @@ -455,7 +455,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

      Updates the configuration settings for an Amazon GameLift Streams stream group resource. You can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.

      Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:

      • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

         </p> </li> <li> <p> <b>On-demand</b>: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). </p> </li> </ul> <p>To update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream group.</p> 
        " + "documentation":"

        Updates the configuration settings for an Amazon GameLift Streams stream group resource. You can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.

        Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity, always-on and on-demand:

        • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        • On-demand: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        To update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream group.

        " } }, "shapes":{ @@ -484,7 +484,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to add the specified locations to.

        This value is a Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to add the specified locations to.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, @@ -503,7 +503,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        This value is the Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.

        " + "documentation":"

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        " }, "Locations":{ "shape":"LocationStates", @@ -562,7 +562,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        An ID that uniquely identifies the application resource. Example ID: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -602,11 +602,11 @@ "members":{ "ApplicationIdentifiers":{ "shape":"Identifiers", - "documentation":"

        A set of applications to associate with the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications to associate with the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to associate to the applications.

        This value is a Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to associate to the applications.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -617,11 +617,11 @@ "members":{ "ApplicationArns":{ "shape":"ArnList", - "documentation":"

        A set of applications that are associated to the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that are associated to the stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        A stream group that is associated to the applications.

        This value is a Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        A stream group that is associated to the applications.

        This value is an Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " } } }, @@ -669,7 +669,7 @@ "members":{ "ApplicationLogOutputUri":{ "shape":"ApplicationLogOutputUri", - "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Application log bucket permission policy in the Amazon GameLift Streams Developer Guide.

        " }, "ApplicationLogPaths":{ "shape":"FilePaths", @@ -720,7 +720,7 @@ }, "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " }, "AssociatedStreamGroups":{ "shape":"ArnList", @@ -740,7 +740,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A unique ID value that is assigned to the resource when it's created. Format example: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -778,7 +778,7 @@ }, "DefaultApplicationIdentifier":{ "shape":"Identifier", - "documentation":"

        The unique identifier of the Amazon GameLift Streams application that you want to associate to a stream group as the default application. The application must be in READY status. By setting the default application identifier, you will optimize startup performance of this application in your stream group. Once set, this application cannot be disassociated from the stream group, unlike applications that are associated using AssociateApplications. If not set when creating a stream group, you will need to call AssociateApplications later, before you can start streaming.

        " + "documentation":"

        The unique identifier of the Amazon GameLift Streams application that you want to associate to a stream group as the default application. The application must be in READY status. By setting the default application identifier, you will optimize startup performance of this application in your stream group. Once set, this application cannot be disassociated from the stream group, unlike applications that are associated using AssociateApplications. If not set when creating a stream group, you will need to call AssociateApplications later, before you can start streaming.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "Description":{ "shape":"Description", @@ -804,11 +804,11 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " }, "AssociatedApplications":{ "shape":"ArnList", - "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -863,7 +863,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        The stream group that you want to run this stream session with. The stream group must be in ACTIVE status and have idle stream capacity.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        The stream group that you want to run this stream session with. The stream group must be in ACTIVE status and have idle stream capacity.

        ", "location":"uri", "locationName":"Identifier" }, @@ -873,7 +873,7 @@ }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4. The stream session must be in PENDING_CLIENT_RECONNECTION or ACTIVE status.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        The stream session must be in PENDING_CLIENT_RECONNECTION or ACTIVE status.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -893,11 +893,11 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Id":{ "shape":"Id", - "documentation":"

        An ID that uniquely identifies the application resource. For example: a-9ZY8X7Wv6.

        " + "documentation":"

        An ID that uniquely identifies the application resource. Example ID: a-9ZY8X7Wv6.

        " } }, "documentation":"

        Represents the default Amazon GameLift Streams application that a stream group hosts.

        " @@ -908,7 +908,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        ", "location":"uri", "locationName":"Identifier" } @@ -920,7 +920,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        The unique ID value of the stream group resource to delete. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -941,11 +941,11 @@ "members":{ "ApplicationIdentifiers":{ "shape":"Identifiers", - "documentation":"

        A set of applications that you want to disassociate from the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that you want to disassociate from the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to disassociate these applications from.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to disassociate these applications from.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -956,11 +956,11 @@ "members":{ "ApplicationArns":{ "shape":"ArnList", - "documentation":"

        A set of applications that are disassociated from this stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that are disassociated from this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " } } }, @@ -992,7 +992,7 @@ "members":{ "OutputUri":{ "shape":"OutputUri", - "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploaded the set of compressed exported files for a stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " + "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploaded the set of compressed exported files for a stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " }, "Status":{ "shape":"ExportFilesStatus", @@ -1028,17 +1028,17 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "OutputUri":{ "shape":"OutputUri", - "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploads the set of compressed exported files for this stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " + "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploads the set of compressed exported files for this stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Format example: 1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -1073,7 +1073,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        ", "location":"uri", "locationName":"Identifier" } @@ -1097,11 +1097,11 @@ }, "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " }, "AssociatedStreamGroups":{ "shape":"ArnList", - "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4.

        " + "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -1117,7 +1117,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A unique ID value that is assigned to the resource when it's created. Format example: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -1147,7 +1147,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        The unique ID value of the stream group resource to retrieve. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -1159,11 +1159,11 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " }, "AssociatedApplications":{ "shape":"ArnList", - "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -1212,13 +1212,13 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        The stream group that runs this stream session.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        The stream group that runs this stream session.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -1237,11 +1237,11 @@ }, "ApplicationArn":{ "shape":"Arn", - "documentation":"

        The application streaming in this session.

        This value is an Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        The application streaming in this session.

        This value is an Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group ARN, this value uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to a stream session resource. When combined with the stream group resource ID, this value uniquely identifies the stream session across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[stream group resource ID]/[stream session resource ID].

        " }, "ConnectionTimeoutSeconds":{ "shape":"ConnectionTimeoutSeconds", @@ -1265,7 +1265,7 @@ }, "Location":{ "shape":"LocationName", - "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "LogFileLocationUri":{ "shape":"FileLocationUri", @@ -1297,7 +1297,7 @@ }, "StreamGroupId":{ "shape":"Id", - "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session.

        " + "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session. Format example: sg-1AB2C3De4.

        " }, "UserId":{ "shape":"UserId", @@ -1456,7 +1456,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        The unique identifier of a Amazon GameLift Streams stream group to retrieve the stream session for. You can use either the stream group ID or the Amazon Resource Name (ARN).

        ", + "documentation":"

        The unique identifier of a Amazon GameLift Streams stream group to retrieve the stream session for. You can use either the stream group ID or the Amazon Resource Name (ARN).

        ", "location":"uri", "locationName":"Identifier" }, @@ -1499,7 +1499,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

        The (Amazon Resource Name (ARN) that you want to retrieve tags for. To get a Amazon GameLift Streams resource ARN, call a List or Get operation for the resource.

        ", + "documentation":"

        The Amazon Resource Name (ARN) that you want to retrieve tags for. To get an Amazon GameLift Streams resource ARN, call a List or Get operation for the resource.

        ", "location":"uri", "locationName":"ResourceArn" } @@ -1520,15 +1520,15 @@ "members":{ "AlwaysOnCapacity":{ "shape":"AlwaysOnCapacity", - "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " + "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " }, "LocationName":{ "shape":"LocationName", - "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "OnDemandCapacity":{ "shape":"OnDemandCapacity", - "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " + "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " } }, "documentation":"

        Configuration settings that define a stream group's stream capacity for a location. When configuring a location for the first time, you must specify a numeric value for at least one of the two capacity types. To update the capacity for an existing stream group, call UpdateStreamGroup. To add a new location and specify its capacity, call AddStreamGroupLocations.

        " @@ -1559,7 +1559,7 @@ }, "AlwaysOnCapacity":{ "shape":"AlwaysOnCapacity", - "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " + "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " }, "IdleCapacity":{ "shape":"CapacityValue", @@ -1567,11 +1567,11 @@ }, "LocationName":{ "shape":"LocationName", - "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "OnDemandCapacity":{ "shape":"OnDemandCapacity", - "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " + "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " }, "RequestedCapacity":{ "shape":"CapacityValue", @@ -1625,13 +1625,13 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to remove the specified locations from.

        This value is a Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to remove the specified locations from.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "Locations":{ "shape":"LocationsList", - "documentation":"

        A set of locations to remove this stream group.

        A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        ", + "documentation":"

        A set of locations to remove this stream group.

        A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        ", "location":"querystring", "locationName":"locations" } @@ -1642,7 +1642,7 @@ "members":{ "Location":{ "shape":"LocationName", - "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "Status":{ "shape":"ReplicationStatusType", @@ -1759,7 +1759,7 @@ }, "ApplicationIdentifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "ClientToken":{ "shape":"ClientToken", @@ -1776,13 +1776,13 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        The stream group to run this stream session with.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        The stream group to run this stream session with.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "Locations":{ "shape":"LocationList", - "documentation":"

        A list of locations, in order of priority, where you want Amazon GameLift Streams to start a stream from. Amazon GameLift Streams selects the location with the next available capacity to start a single stream session in. If this value is empty, Amazon GameLift Streams attempts to start a stream session in the primary location.

        This value is A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A list of locations, in order of priority, where you want Amazon GameLift Streams to start a stream from. Amazon GameLift Streams selects the location with the next available capacity to start a single stream session in. If this value is empty, Amazon GameLift Streams attempts to start a stream session in the primary location.

        This value is A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "Protocol":{ "shape":"Protocol", @@ -1794,7 +1794,7 @@ }, "SignalRequest":{ "shape":"SignalRequest", - "documentation":"

        A WebRTC ICE offer string to use when initializing a WebRTC connection. The offer is a very long JSON string. Provide the string as a text value in quotes.

        " + "documentation":"

        A WebRTC ICE offer string to use when initializing a WebRTC connection. Typically, the offer is a very long JSON string. Provide the string as a text value in quotes.

        Amazon GameLift Streams also supports setting the field to \"NO_CLIENT_CONNECTION\". This will create a session without needing any browser request or Web SDK integration. The session starts up as usual and waits for a reconnection from a browser, which is accomplished using CreateStreamSessionConnection.

        " }, "UserId":{ "shape":"UserId", @@ -1815,11 +1815,11 @@ }, "ApplicationArn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group ARN, this value uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to a stream session resource. When combined with the stream group resource ID, this value uniquely identifies the stream session across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[stream group resource ID]/[stream session resource ID].

        " }, "ConnectionTimeoutSeconds":{ "shape":"ConnectionTimeoutSeconds", @@ -1843,7 +1843,7 @@ }, "Location":{ "shape":"LocationName", - "documentation":"

        The location where Amazon GameLift Streams is streaming your application from.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        The location where Amazon GameLift Streams is streaming your application from.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "LogFileLocationUri":{ "shape":"FileLocationUri", @@ -1875,7 +1875,7 @@ }, "StreamGroupId":{ "shape":"Id", - "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session.

        " + "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session. Format example: sg-1AB2C3De4.

        " }, "UserId":{ "shape":"UserId", @@ -1931,7 +1931,7 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -1947,7 +1947,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        An ID that uniquely identifies the stream group resource. Example ID: sg-1AB2C3De4.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -1995,11 +1995,11 @@ "members":{ "ApplicationArn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4. .

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -2015,7 +2015,7 @@ }, "Location":{ "shape":"LocationName", - "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "Protocol":{ "shape":"Protocol", @@ -2057,7 +2057,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to apply tags to.

        ", + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to apply tags to.

        ", "location":"uri", "locationName":"ResourceArn" }, @@ -2093,13 +2093,13 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        The stream group that runs this stream session.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        The stream group that runs this stream session.

        ", "location":"uri", "locationName":"Identifier" }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -2132,7 +2132,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to remove tags from.

        ", + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to remove tags from.

        ", "location":"uri", "locationName":"ResourceArn" }, @@ -2155,7 +2155,7 @@ "members":{ "ApplicationLogOutputUri":{ "shape":"ApplicationLogOutputUri", - "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Application log bucket permission policy in the Amazon GameLift Streams Developer Guide.

        " }, "ApplicationLogPaths":{ "shape":"FilePaths", @@ -2167,7 +2167,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        ", "location":"uri", "locationName":"Identifier" } @@ -2191,11 +2191,11 @@ }, "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " }, "AssociatedStreamGroups":{ "shape":"ArnList", - "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4.

        " + "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -2211,7 +2211,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A unique ID value that is assigned to the resource when it's created. Format example: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -2245,7 +2245,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, @@ -2261,11 +2261,11 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " }, "AssociatedApplications":{ "shape":"ArnList", - "documentation":"

        A set of applications that this stream group is associated with. You can stream any of these applications with the stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that this stream group is associated with. You can stream any of these applications with the stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -2329,5 +2329,5 @@ }, "WebSdkProtocolUrl":{"type":"string"} }, - "documentation":"

        Amazon GameLift Streams

        Amazon GameLift Streams provides a global cloud solution for content streaming experiences. Use Amazon GameLift Streams tools to upload and configure content for streaming, deploy and scale computing resources to host streams, and manage stream session placement to meet customer demand.

        This Reference Guide describes the Amazon GameLift Streams service API. You can use the API through the Amazon Web Services SDK, the Command Line Interface (AWS CLI), or by making direct REST calls through HTTPS.

        See the Amazon GameLift Streams Developer Guide for more information on how Amazon GameLift Streams works and how to work with it.

        " + "documentation":"

        Amazon GameLift Streams

        Amazon GameLift Streams provides a global cloud solution for content streaming experiences. Use Amazon GameLift Streams tools to upload and configure content for streaming, deploy and scale computing resources to host streams, and manage stream session placement to meet customer demand.

        This Reference Guide describes the Amazon GameLift Streams service API. You can use the API through the Amazon Web Services SDK, the Command Line Interface (CLI), or by making direct REST calls through HTTPS.

        See the Amazon GameLift Streams Developer Guide for more information on how Amazon GameLift Streams works and how to work with it.

        " } diff --git a/services/geomaps/pom.xml b/services/geomaps/pom.xml index a74737c2d00e..edd04c660078 100644 --- a/services/geomaps/pom.xml +++ b/services/geomaps/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT geomaps AWS Java SDK :: Services :: Geo Maps diff --git a/services/geoplaces/pom.xml b/services/geoplaces/pom.xml index 8da6b909a9aa..b81832b3b487 100644 --- a/services/geoplaces/pom.xml +++ b/services/geoplaces/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT geoplaces AWS Java SDK :: Services :: Geo Places diff --git a/services/georoutes/pom.xml b/services/georoutes/pom.xml index 6a631c9eef48..ef16f6624d98 100644 --- a/services/georoutes/pom.xml +++ b/services/georoutes/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT georoutes AWS Java SDK :: Services :: Geo Routes diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 6e5e15e4553f..46f49c8f1ba3 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 636d835e3fd1..b56b24bb41bf 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index f3814b23c70c..ffdc701944e1 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 glue diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index b017d105fe3c..41615b30b8b4 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index b4f1efbc861a..d49625823449 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index a985e299226a..984711d825e1 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index c5045130c3c9..b28c752e4402 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index c36637165728..716ed026c722 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 guardduty diff --git a/services/health/pom.xml b/services/health/pom.xml index d7c5e8349ddb..b33368160ba4 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 829169e10939..951c9f2c28c2 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 5d1f341ffef3..1fd310ddbb89 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 9c7797678c1e..713cfc85ee4c 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 184e3674027a..aee02c4359c3 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index e7402dca79bc..5f84330782c2 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index be5cdb863cd7..6cdb3ee5fa9b 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/inspectorscan/pom.xml b/services/inspectorscan/pom.xml index 0efe1bbc8d96..a3412af1836c 100644 --- a/services/inspectorscan/pom.xml +++ b/services/inspectorscan/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT inspectorscan AWS Java SDK :: Services :: Inspector Scan diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index d441afff84fa..befda581738c 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/invoicing/pom.xml b/services/invoicing/pom.xml index 4fc789093a81..80bea8a5219d 100644 --- a/services/invoicing/pom.xml +++ b/services/invoicing/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT invoicing AWS Java SDK :: Services :: Invoicing diff --git a/services/invoicing/src/main/resources/codegen-resources/paginators-1.json b/services/invoicing/src/main/resources/codegen-resources/paginators-1.json index 860206357477..98952e1a4868 100644 --- a/services/invoicing/src/main/resources/codegen-resources/paginators-1.json +++ b/services/invoicing/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "ListInvoiceSummaries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InvoiceSummaries" + }, "ListInvoiceUnits": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/invoicing/src/main/resources/codegen-resources/service-2.json b/services/invoicing/src/main/resources/codegen-resources/service-2.json index df50ba463cab..6de121f6015d 100644 --- a/services/invoicing/src/main/resources/codegen-resources/service-2.json +++ b/services/invoicing/src/main/resources/codegen-resources/service-2.json @@ -24,11 +24,11 @@ "input":{"shape":"BatchGetInvoiceProfileRequest"}, "output":{"shape":"BatchGetInvoiceProfileResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This gets the invoice profile associated with a set of accounts. The accounts must be linked accounts under the requester management account organization.

        " }, @@ -41,10 +41,10 @@ "input":{"shape":"CreateInvoiceUnitRequest"}, "output":{"shape":"CreateInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This creates a new invoice unit with the provided definition.

        " }, @@ -57,11 +57,11 @@ "input":{"shape":"DeleteInvoiceUnitRequest"}, "output":{"shape":"DeleteInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This deletes an invoice unit with the provided invoice unit ARN.

        " }, @@ -74,14 +74,31 @@ "input":{"shape":"GetInvoiceUnitRequest"}, "output":{"shape":"GetInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This retrieves the invoice unit definition.

        " }, + "ListInvoiceSummaries":{ + "name":"ListInvoiceSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInvoiceSummariesRequest"}, + "output":{"shape":"ListInvoiceSummariesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Retrieves your invoice details programmatically, without line item details.

        " + }, "ListInvoiceUnits":{ "name":"ListInvoiceUnits", "http":{ @@ -91,10 +108,10 @@ "input":{"shape":"ListInvoiceUnitsRequest"}, "output":{"shape":"ListInvoiceUnitsResponse"}, "errors":[ - {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This fetches a list of all invoice unit definitions for a given account, as of the provided AsOf date.

        " }, @@ -107,11 +124,11 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        Lists the tags for a resource.

        " }, @@ -124,12 +141,12 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        Adds a tag to a resource.

        " }, @@ -142,11 +159,11 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        Removes a tag from a resource.

        " }, @@ -159,11 +176,11 @@ "input":{"shape":"UpdateInvoiceUnitRequest"}, "output":{"shape":"UpdateInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        You can update the invoice unit configuration at any time, and Amazon Web Services will use the latest configuration at the end of the month.

        " } @@ -191,8 +208,36 @@ "type":"string", "pattern":"\\d{12}" }, + "AmountBreakdown":{ + "type":"structure", + "members":{ + "SubTotalAmount":{ + "shape":"BasicString", + "documentation":"

        The total of a set of the breakdown.

        " + }, + "Discounts":{ + "shape":"DiscountsBreakdown", + "documentation":"

        The discounted amount.

        " + }, + "Taxes":{ + "shape":"TaxesBreakdown", + "documentation":"

        The tax amount.

        " + }, + "Fees":{ + "shape":"FeesBreakdown", + "documentation":"

        The fee amount.

        " + } + }, + "documentation":"

        Details about how the total amount was calculated and categorized.

        " + }, "AsOfTimestamp":{"type":"timestamp"}, "BasicString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\s\\S]*" + }, + "BasicStringWithoutSpace":{ "type":"string", "max":1024, "min":0, @@ -217,6 +262,24 @@ } } }, + "BillingPeriod":{ + "type":"structure", + "required":[ + "Month", + "Year" + ], + "members":{ + "Month":{ + "shape":"Month", + "documentation":"

        The billing period month.

        " + }, + "Year":{ + "shape":"Year", + "documentation":"

        The billing period year.

        " + } + }, + "documentation":"

        The billing period for which you want to retrieve invoice-related documents.

        " + }, "CreateInvoiceUnitRequest":{ "type":"structure", "required":[ @@ -260,6 +323,47 @@ } } }, + "CurrencyCode":{ + "type":"string", + "max":3, + "min":3 + }, + "CurrencyExchangeDetails":{ + "type":"structure", + "members":{ + "SourceCurrencyCode":{ + "shape":"BasicString", + "documentation":"

        The exchange source currency.

        " + }, + "TargetCurrencyCode":{ + "shape":"BasicString", + "documentation":"

        The exchange target currency.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        The currency exchange rate.

        " + } + }, + "documentation":"

        The details of currency exchange.

        " + }, + "DateInterval":{ + "type":"structure", + "required":[ + "StartDate", + "EndDate" + ], + "members":{ + "StartDate":{ + "shape":"Timestamp", + "documentation":"

        The beginning of the time period that you want invoice-related documents for. The start date is inclusive. For example, if start is 2019-01-01, AWS retrieves invoices starting at 2019-01-01 up to the end date.

        " + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

        The end of the time period that you want invoice-related documents for. The end date is exclusive. For example, if end is 2019-01-10, Amazon Web Services retrieves invoice-related documents from the start date up to, but not including, 2018-01-10.

        " + } + }, + "documentation":"

        The time period that you want invoice-related documents for.

        " + }, "DeleteInvoiceUnitRequest":{ "type":"structure", "required":["InvoiceUnitArn"], @@ -285,6 +389,88 @@ "min":0, "pattern":"[\\S\\s]*" }, + "DiscountsBreakdown":{ + "type":"structure", + "members":{ + "Breakdown":{ + "shape":"DiscountsBreakdownAmountList", + "documentation":"

        The list of discounts information.

        " + }, + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The discount's total amount.

        " + } + }, + "documentation":"

        The discounts details.

        " + }, + "DiscountsBreakdownAmount":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"BasicString", + "documentation":"

        The list of discounts information.

        " + }, + "Amount":{ + "shape":"BasicString", + "documentation":"

        The discounted amount.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        The details for the discount rate..

        " + } + }, + "documentation":"

        The discounted amount.

        " + }, + "DiscountsBreakdownAmountList":{ + "type":"list", + "member":{"shape":"DiscountsBreakdownAmount"} + }, + "Entity":{ + "type":"structure", + "members":{ + "InvoicingEntity":{ + "shape":"BasicString", + "documentation":"

        The name of the entity that issues the Amazon Web Services invoice.

        " + } + }, + "documentation":"

        The organization name providing Amazon Web Services services.

        " + }, + "FeesBreakdown":{ + "type":"structure", + "members":{ + "Breakdown":{ + "shape":"FeesBreakdownAmountList", + "documentation":"

        The list of fees information.

        " + }, + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The total amount of fees.

        " + } + }, + "documentation":"

        The details of fees.

        " + }, + "FeesBreakdownAmount":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"BasicString", + "documentation":"

        The list of fees information.

        " + }, + "Amount":{ + "shape":"BasicString", + "documentation":"

        The fee amount.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        Details about the rate amount.

        " + } + }, + "documentation":"

        The fee amount.

        " + }, + "FeesBreakdownAmountList":{ + "type":"list", + "member":{"shape":"FeesBreakdownAmount"} + }, "Filters":{ "type":"structure", "members":{ @@ -365,6 +551,32 @@ "exception":true, "fault":true }, + "InvoiceCurrencyAmount":{ + "type":"structure", + "members":{ + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The invoice currency amount.

        " + }, + "TotalAmountBeforeTax":{ + "shape":"BasicString", + "documentation":"

        Details about the invoice total amount before tax.

        " + }, + "CurrencyCode":{ + "shape":"CurrencyCode", + "documentation":"

        The currency dominion of the invoice document.

        " + }, + "AmountBreakdown":{ + "shape":"AmountBreakdown", + "documentation":"

        Details about the invoice currency amount.

        " + }, + "CurrencyExchangeDetails":{ + "shape":"CurrencyExchangeDetails", + "documentation":"

        The details of currency exchange.

        " + } + }, + "documentation":"

        The amount charged after taxes, in the preferred currency.

        " + }, "InvoiceProfile":{ "type":"structure", "members":{ @@ -373,7 +585,7 @@ "documentation":"

        The account ID the invoice profile is generated for.

        " }, "ReceiverName":{ - "shape":"BasicString", + "shape":"BasicStringWithoutSpace", "documentation":"

        The name of the person receiving the invoice profile.

        " }, "ReceiverAddress":{ @@ -381,20 +593,127 @@ "documentation":"

        The address of the receiver that will be printed on the invoice.

        " }, "ReceiverEmail":{ - "shape":"SensitiveBasicString", + "shape":"SensitiveBasicStringWithoutSpace", "documentation":"

        The email address for the invoice profile receiver.

        " }, "Issuer":{ - "shape":"BasicString", + "shape":"BasicStringWithoutSpace", "documentation":"

        This specifies the issuing entity of the invoice.

        " }, "TaxRegistrationNumber":{ - "shape":"SensitiveBasicString", + "shape":"SensitiveBasicStringWithoutSpace", "documentation":"

        Your Tax Registration Number (TRN) information.

        " } }, "documentation":"

        Contains high-level information about the invoice receiver.

        " }, + "InvoiceSummaries":{ + "type":"list", + "member":{"shape":"InvoiceSummary"} + }, + "InvoiceSummariesFilter":{ + "type":"structure", + "members":{ + "TimeInterval":{ + "shape":"DateInterval", + "documentation":"

        The date range for invoice summary retrieval.

        " + }, + "BillingPeriod":{ + "shape":"BillingPeriod", + "documentation":"

        The billing period associated with the invoice documents.

        " + }, + "InvoicingEntity":{ + "shape":"BasicString", + "documentation":"

        The name of the entity that issues the Amazon Web Services invoice.

        " + } + }, + "documentation":"

        Filters for your invoice summaries.

        " + }, + "InvoiceSummariesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "InvoiceSummariesSelector":{ + "type":"structure", + "required":[ + "ResourceType", + "Value" + ], + "members":{ + "ResourceType":{ + "shape":"ListInvoiceSummariesResourceType", + "documentation":"

        The query identifier type (INVOICE_ID or ACCOUNT_ID).

        " + }, + "Value":{ + "shape":"StringWithoutNewLine", + "documentation":"

        The value of the query identifier.

        " + } + }, + "documentation":"

        Specifies the invoice summary.

        " + }, + "InvoiceSummary":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountIdString", + "documentation":"

        The Amazon Web Services account ID.

        " + }, + "InvoiceId":{ + "shape":"BasicString", + "documentation":"

        The invoice ID.

        " + }, + "IssuedDate":{ + "shape":"Timestamp", + "documentation":"

        The issued date of the invoice.

        " + }, + "DueDate":{ + "shape":"Timestamp", + "documentation":"

        The invoice due date.

        " + }, + "Entity":{ + "shape":"Entity", + "documentation":"

        The organization name providing Amazon Web Services services.

        " + }, + "BillingPeriod":{ + "shape":"BillingPeriod", + "documentation":"

        The billing period of the invoice-related document.

        " + }, + "InvoiceType":{ + "shape":"InvoiceType", + "documentation":"

        The type of invoice.

        " + }, + "OriginalInvoiceId":{ + "shape":"BasicString", + "documentation":"

        The initial or original invoice ID.

        " + }, + "PurchaseOrderNumber":{ + "shape":"BasicString", + "documentation":"

        The purchase order number associated to the invoice.

        " + }, + "BaseCurrencyAmount":{ + "shape":"InvoiceCurrencyAmount", + "documentation":"

        The summary with the product and service currency.

        " + }, + "TaxCurrencyAmount":{ + "shape":"InvoiceCurrencyAmount", + "documentation":"

        The summary with the tax currency.

        " + }, + "PaymentCurrencyAmount":{ + "shape":"InvoiceCurrencyAmount", + "documentation":"

        The summary with the customer configured currency.

        " + } + }, + "documentation":"

        The invoice that the API retrieved.

        " + }, + "InvoiceType":{ + "type":"string", + "enum":[ + "INVOICE", + "CREDIT_MEMO" + ] + }, "InvoiceUnit":{ "type":"structure", "members":{ @@ -461,6 +780,49 @@ "member":{"shape":"InvoiceUnit"} }, "LastModifiedTimestamp":{"type":"timestamp"}, + "ListInvoiceSummariesRequest":{ + "type":"structure", + "required":["Selector"], + "members":{ + "Selector":{ + "shape":"InvoiceSummariesSelector", + "documentation":"

        The option to retrieve details for a specific invoice by providing its unique ID. Alternatively, access information for all invoices linked to the account by providing an account ID.

        " + }, + "Filter":{ + "shape":"InvoiceSummariesFilter", + "documentation":"

        Filters you can use to customize your invoice summary.

        " + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

        The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

        " + }, + "MaxResults":{ + "shape":"InvoiceSummariesMaxResults", + "documentation":"

        The maximum number of invoice summaries a paginated response can contain.

        " + } + } + }, + "ListInvoiceSummariesResourceType":{ + "type":"string", + "enum":[ + "ACCOUNT_ID", + "INVOICE_ID" + ] + }, + "ListInvoiceSummariesResponse":{ + "type":"structure", + "required":["InvoiceSummaries"], + "members":{ + "InvoiceSummaries":{ + "shape":"InvoiceSummaries", + "documentation":"

        List of key (summary level) invoice details without line item details.

        " + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

        The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

        " + } + } + }, "ListInvoiceUnitsRequest":{ "type":"structure", "members":{ @@ -520,9 +882,14 @@ "max":500, "min":1 }, + "Month":{ + "type":"integer", + "box":true, + "max":12, + "min":1 + }, "NextTokenString":{ "type":"string", - "max":2048, "min":1, "pattern":"[\\S\\s]*" }, @@ -625,7 +992,7 @@ "max":256, "min":0 }, - "SensitiveBasicString":{ + "SensitiveBasicStringWithoutSpace":{ "type":"string", "max":1024, "min":0, @@ -641,6 +1008,12 @@ "documentation":"

        The request was rejected because it attempted to create resources beyond the current Amazon Web Services account limits. The error message describes the limit exceeded.

        ", "exception":true }, + "StringWithoutNewLine":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*" + }, "TagResourceRequest":{ "type":"structure", "required":[ @@ -667,9 +1040,45 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws[-a-z0-9]*:[a-z0-9]+:[-a-z0-9]*:[0-9]{12}:[-a-zA-Z0-9/:_]+" + "pattern":"arn:aws[-a-z0-9]*:(invoicing)::[0-9]{12}:[-a-zA-Z0-9/:_]+" }, "TaxInheritanceDisabledFlag":{"type":"boolean"}, + "TaxesBreakdown":{ + "type":"structure", + "members":{ + "Breakdown":{ + "shape":"TaxesBreakdownAmountList", + "documentation":"

        A list of tax information.

        " + }, + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The total amount for your taxes.

        " + } + }, + "documentation":"

        The details of the taxes.

        " + }, + "TaxesBreakdownAmount":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"BasicString", + "documentation":"

        The details of the taxes.

        " + }, + "Amount":{ + "shape":"BasicString", + "documentation":"

        The tax amount.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        The details of the tax rate.

        " + } + }, + "documentation":"

        The tax amount.

        " + }, + "TaxesBreakdownAmountList":{ + "type":"list", + "member":{"shape":"TaxesBreakdownAmount"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -678,6 +1087,7 @@ "documentation":"

        The request was denied due to request throttling.

        ", "exception":true }, + "Timestamp":{"type":"timestamp"}, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -792,6 +1202,12 @@ "unknownOperation", "other" ] + }, + "Year":{ + "type":"integer", + "box":true, + "max":2050, + "min":2005 } }, "documentation":"

        Amazon Web Services Invoice Configuration

        You can use Amazon Web Services Invoice Configuration APIs to programmatically create, update, delete, get, and list invoice units. You can also programmatically fetch the information of the invoice receiver. For example, business legal name, address, and invoicing contacts.

        You can use Amazon Web Services Invoice Configuration to receive separate Amazon Web Services invoices based your organizational needs. By using Amazon Web Services Invoice Configuration, you can configure invoice units that are groups of Amazon Web Services accounts that represent your business entities, and receive separate invoices for each business entity. You can also assign a unique member or payer account as the invoice receiver for each invoice unit. As you create new accounts within your Organizations using Amazon Web Services Invoice Configuration APIs, you can automate the creation of new invoice units and subsequently automate the addition of new accounts to your invoice units.

        Service endpoint

        You can use the following endpoints for Amazon Web Services Invoice Configuration:

        • https://invoicing.us-east-1.api.aws

        " diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 99e9382f49b0..1d57af67f3b0 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index d4be4932ac6a..f82d6f1afc85 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 2947b3335e7d..9672c608935e 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 52e77c46e05c..240112c188db 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index db9fead6a719..cb8090202281 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 9a8698162c83..a6bc3e64085f 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index 4d9642ec0e8e..5953feb5f580 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index af7a33ee24a5..ebc5d8f9a46a 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json b/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json index 94809d3642d1..19bef5b92d1d 100644 --- a/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json +++ b/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json @@ -5297,7 +5297,7 @@ }, "status":{ "shape":"CampaignStatus", - "documentation":"

        The state of a campaign. The status can be one of:

        • CREATING - Amazon Web Services IoT FleetWise is processing your request to create the campaign.

        • WAITING_FOR_APPROVAL - After a campaign is created, it enters the WAITING_FOR_APPROVAL state. To allow Amazon Web Services IoT FleetWise to deploy the campaign to the target vehicle or fleet, use the API operation to approve the campaign.

        • RUNNING - The campaign is active.

        • SUSPENDED - The campaign is suspended. To resume the campaign, use the API operation.

        " + "documentation":"

        The state of a campaign. The status can be one of:

        • CREATING - Amazon Web Services IoT FleetWise is processing your request to create the campaign.

        • WAITING_FOR_APPROVAL - After you create a campaign, it enters this state. Use the API operation to approve the campaign for deployment to the target vehicle or fleet.

        • RUNNING - The campaign is active.

        • SUSPENDED - The campaign is suspended. To resume the campaign, use the API operation.

        " } } }, @@ -5742,7 +5742,8 @@ "READY", "HEALTHY", "SUSPENDED", - "DELETING" + "DELETING", + "READY_FOR_CHECKIN" ] }, "VehicleStatus":{ @@ -5758,7 +5759,7 @@ }, "status":{ "shape":"VehicleState", - "documentation":"

        The status of a campaign, which can be one of the following:

        • CREATED - The campaign has been created successfully but has not been approved.

        • READY - The campaign has been approved but has not been deployed to the vehicle.

        • HEALTHY - The campaign has been deployed to the vehicle.

        • SUSPENDED - The campaign has been suspended and data collection is paused.

        • DELETING - The campaign is being removed from the vehicle.

        " + "documentation":"

        The status of a campaign, which can be one of the following:

        • CREATED - The campaign exists but is not yet approved.

        • READY - The campaign is approved but has not been deployed to the vehicle. Data has not arrived at the vehicle yet.

        • HEALTHY - The campaign is deployed to the vehicle.

        • SUSPENDED - The campaign is suspended and data collection is paused.

        • DELETING - The campaign is being removed from the vehicle.

        • READY_FOR_CHECKIN - The campaign is approved and waiting for vehicle check-in before deployment.

        " } }, "documentation":"

        Information about a campaign associated with a vehicle.

        " diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index 95815ad9fd4b..4a35e4760883 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotmanagedintegrations/pom.xml b/services/iotmanagedintegrations/pom.xml index ebe9741bad4f..f8c2dc08b977 100644 --- a/services/iotmanagedintegrations/pom.xml +++ b/services/iotmanagedintegrations/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotmanagedintegrations AWS Java SDK :: Services :: IoT Managed Integrations diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 8f90cb305db3..8e38785b9ddd 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index cc2bc44a2c2a..d72041d10d45 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index 8fce72bf6889..12b120aabea4 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index 5b40c46514b6..7101c68fc607 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index 271515df503c..e677e5550bdf 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index fc862c54906b..c8e0146c0896 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 160595c9261f..a4850d331b2b 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 04750323270d..fb4b0a962d71 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json b/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json index 309300d0e943..2b5765be5c1f 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json @@ -21,6 +21,12 @@ "output_token": "nextToken", "limit_key": "maxResults" }, + "ListParticipantReplicas": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "replicas" + }, "ListParticipants": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json b/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json index 19089d49a743..2a82fdd4ada3 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json +++ b/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json @@ -436,6 +436,21 @@ ], "documentation":"

        Lists events for a specified participant that occurred during a specified stage session.

        " }, + "ListParticipantReplicas":{ + "name":"ListParticipantReplicas", + "http":{ + "method":"POST", + "requestUri":"/ListParticipantReplicas", + "responseCode":200 + }, + "input":{"shape":"ListParticipantReplicasRequest"}, + "output":{"shape":"ListParticipantReplicasResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Lists all the replicas for a participant from a source stage.

        " + }, "ListParticipants":{ "name":"ListParticipants", "http":{ @@ -551,6 +566,26 @@ ], "documentation":"

        Starts a Composition from a stage based on the configuration provided in the request.

        A Composition is an ephemeral resource that exists after this operation returns successfully. Composition stops and the resource is deleted:

        • When StopComposition is called.

        • After a 1-minute timeout, when all participants are disconnected from the stage.

        • After a 1-minute timeout, if there are no participants in the stage when StartComposition is called.

        • When broadcasting to the IVS channel fails and all retries are exhausted.

        • When broadcasting is disconnected and all attempts to reconnect are exhausted.

        " }, + "StartParticipantReplication":{ + "name":"StartParticipantReplication", + "http":{ + "method":"POST", + "requestUri":"/StartParticipantReplication", + "responseCode":200 + }, + "input":{"shape":"StartParticipantReplicationRequest"}, + "output":{"shape":"StartParticipantReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

        Starts replicating a publishing participant from a source stage to a destination stage.

        " + }, "StopComposition":{ "name":"StopComposition", "http":{ @@ -570,6 +605,23 @@ ], "documentation":"

        Stops and deletes a Composition resource. Any broadcast from the Composition resource is stopped.

        " }, + "StopParticipantReplication":{ + "name":"StopParticipantReplication", + "http":{ + "method":"POST", + "requestUri":"/StopParticipantReplication", + "responseCode":200 + }, + "input":{"shape":"StopParticipantReplicationRequest"}, + "output":{"shape":"StopParticipantReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Stops a replicated participant session.

        " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -734,6 +786,10 @@ "hlsConfiguration":{ "shape":"ParticipantRecordingHlsConfiguration", "documentation":"

        HLS configuration object for individual participant recording.

        " + }, + "recordParticipantReplicas":{ + "shape":"RecordParticipantReplicas", + "documentation":"

        Optional field to disable replica participant recording. If this is set to false when a participant is a replica, replica participants are not recorded. Default: true.

        " } }, "documentation":"

        Object specifying a configuration for individual participant recording.

        " @@ -834,7 +890,7 @@ "members":{ "targetSegmentDurationSeconds":{ "shape":"CompositionRecordingTargetSegmentDurationSeconds", - "documentation":"

        Defines the target duration for recorded segments generated when using composite recording. Segments may have durations shorter than the specified value when needed to ensure each segment begins with a keyframe. Default: 2.

        " + "documentation":"

        Defines the target duration for recorded segments generated when using composite recording. Default: 2.

        " } }, "documentation":"

        An object representing a configuration of HLS recordings for server-side composition.

        " @@ -1467,6 +1523,18 @@ "errorCode":{ "shape":"EventErrorCode", "documentation":"

        If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null.

        • B_FRAME_PRESENT — The participant's stream includes B-frames. For details, see IVS RTMP Publishing.

        • BITRATE_EXCEEDED — The participant exceeded the maximum supported bitrate. For details, see Service Quotas.

        • INSUFFICIENT_CAPABILITIES — The participant tried to take an action that the participant’s token is not allowed to do. For details on participant capabilities, see the capabilities field in CreateParticipantToken.

        • INTERNAL_SERVER_EXCEPTION — The participant failed to publish to the stage due to an internal server error.

        • INVALID_AUDIO_CODEC — The participant is using an invalid audio codec. For details, see Stream Ingest.

        • INVALID_INPUT — The participant is using an invalid input stream.

        • INVALID_PROTOCOL — The participant's IngestConfiguration resource is configured for RTMPS but they tried streaming with RTMP. For details, see IVS RTMP Publishing.

        • INVALID_STREAM_KEY — The participant is using an invalid stream key. For details, see IVS RTMP Publishing.

        • INVALID_VIDEO_CODEC — The participant is using an invalid video codec. For details, see Stream Ingest.

        • PUBLISHER_NOT_FOUND — The participant tried to subscribe to a publisher that doesn’t exist.

        • QUOTA_EXCEEDED — The number of participants who want to publish/subscribe to a stage exceeds the quota. For details, see Service Quotas.

        • RESOLUTION_EXCEEDED — The participant exceeded the maximum supported resolution. For details, see Service Quotas.

        • REUSE_OF_STREAM_KEY — The participant tried to use a stream key that is associated with another active stage session.

        • STREAM_DURATION_EXCEEDED — The participant exceeded the maximum allowed stream duration. For details, see Service Quotas.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is replicated. Applicable only if the event name is REPLICATION_STARTED or REPLICATION_STOPPED.

        " + }, + "destinationSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the destination stage. Applicable only if the event name is REPLICATION_STARTED or REPLICATION_STOPPED.

        " + }, + "replica":{ + "shape":"Replica", + "documentation":"

        If true, this indicates the participantId is a replicated participant. If this is a subscribe event, then this flag refers to remoteParticipantId.

        " } }, "documentation":"

        An occurrence during a stage session.

        " @@ -1505,7 +1573,9 @@ "SUBSCRIBE_STOPPED", "PUBLISH_ERROR", "SUBSCRIBE_ERROR", - "JOIN_ERROR" + "JOIN_ERROR", + "REPLICATION_STARTED", + "REPLICATION_STOPPED" ] }, "Framerate":{ @@ -2096,6 +2166,45 @@ } } }, + "ListParticipantReplicasRequest":{ + "type":"structure", + "required":[ + "sourceStageArn", + "participantId" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is publishing.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that has been replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

        The first participant to retrieve. This is used for pagination; see the nextToken response field.

        " + }, + "maxResults":{ + "shape":"MaxParticipantReplicaResults", + "documentation":"

        Maximum number of results to return. Default: 50.

        " + } + } + }, + "ListParticipantReplicasResponse":{ + "type":"structure", + "required":["replicas"], + "members":{ + "replicas":{ + "shape":"ParticipantReplicaList", + "documentation":"

        List of all participant replicas.

        " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

        If there are more participants than maxResults, use nextToken in the request to get the next set.

        " + } + } + }, "ListParticipantsRequest":{ "type":"structure", "required":[ @@ -2310,6 +2419,12 @@ "max":100, "min":1 }, + "MaxParticipantReplicaResults":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxParticipantResults":{ "type":"integer", "box":true, @@ -2404,7 +2519,7 @@ }, "recordingS3Prefix":{ "shape":"ParticipantRecordingS3Prefix", - "documentation":"

        S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled.

        " + "documentation":"

        S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled. If individual participant recording merge is enabled, and if a stage publisher disconnects from a stage and then reconnects, IVS tries to record to the same S3 prefix as the previous session. See Merge Fragmented Individual Participant Recordings.

        " }, "recordingState":{ "shape":"ParticipantRecordingState", @@ -2413,6 +2528,22 @@ "protocol":{ "shape":"ParticipantProtocol", "documentation":"

        Type of ingest protocol that the participant employs for broadcasting.

        " + }, + "replicationType":{ + "shape":"ReplicationType", + "documentation":"

        Indicates if the participant has been replicated to another stage or is a replica from another stage. Default: NONE.

        " + }, + "replicationState":{ + "shape":"ReplicationState", + "documentation":"

        The participant's replication state.

        " + }, + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        Source stage ARN from which this participant is replicated, if replicationType is REPLICA.

        " + }, + "sourceSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the source stage, if replicationType is REPLICA.

        " } }, "documentation":"

        Object describing a participant that has joined a stage.

        " @@ -2515,6 +2646,48 @@ "max":10, "min":2 }, + "ParticipantReplica":{ + "type":"structure", + "required":[ + "sourceStageArn", + "participantId", + "sourceSessionId", + "destinationStageArn", + "destinationSessionId", + "replicationState" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage from which this participant is replicated.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that will be replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + }, + "sourceSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the source stage.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is replicated.

        " + }, + "destinationSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the destination stage.

        " + }, + "replicationState":{ + "shape":"ReplicationState", + "documentation":"

        Replica’s current replication state.

        " + } + }, + "documentation":"

        Information about the replicated destination stage for a participant.

        " + }, + "ParticipantReplicaList":{ + "type":"list", + "member":{"shape":"ParticipantReplica"} + }, "ParticipantState":{ "type":"string", "enum":[ @@ -2548,6 +2721,22 @@ "recordingState":{ "shape":"ParticipantRecordingState", "documentation":"

        The participant’s recording state.

        " + }, + "replicationType":{ + "shape":"ReplicationType", + "documentation":"

        Indicates if the participant has been replicated to another stage or is a replica from another stage. Default: NONE.

        " + }, + "replicationState":{ + "shape":"ReplicationState", + "documentation":"

        The participant's replication state.

        " + }, + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage from which this participant is replicated.

        " + }, + "sourceSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the source stage, if replicationType is REPLICA.

        " } }, "documentation":"

        Summary object describing a participant that has joined a stage.

        " @@ -2660,7 +2849,12 @@ "type":"timestamp", "timestampFormat":"iso8601" }, - "ParticipantTokenId":{"type":"string"}, + "ParticipantTokenId":{ + "type":"string", + "max":64, + "min":0, + "pattern":"[a-zA-Z0-9-_]*" + }, "ParticipantTokenList":{ "type":"list", "member":{"shape":"ParticipantToken"} @@ -2879,6 +3073,13 @@ "documentation":"

        Summary information about a public key.

        " }, "Published":{"type":"boolean"}, + "ReconnectWindowSeconds":{ + "type":"integer", + "box":true, + "max":60, + "min":0 + }, + "RecordParticipantReplicas":{"type":"boolean"}, "RecordingConfiguration":{ "type":"structure", "members":{ @@ -2897,6 +3098,22 @@ "type":"string", "enum":["HLS"] }, + "Replica":{"type":"boolean"}, + "ReplicationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "STOPPED" + ] + }, + "ReplicationType":{ + "type":"string", + "enum":[ + "SOURCE", + "REPLICA", + "NONE" + ] + }, "ResourceArn":{ "type":"string", "max":128, @@ -3265,6 +3482,83 @@ } } }, + "StartParticipantReplicationRequest":{ + "type":"structure", + "required":[ + "sourceStageArn", + "destinationStageArn", + "participantId" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is publishing.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage to which the participant will be replicated.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that will be replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + }, + "reconnectWindowSeconds":{ + "shape":"ReconnectWindowSeconds", + "documentation":"

        If the participant disconnects and then reconnects within the specified interval, replication will continue to be ACTIVE. Default: 0.

        " + }, + "attributes":{ + "shape":"ParticipantAttributes", + "documentation":"

        Application-provided attributes to set on the replicated participant in the destination stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

        These attributes are merged with any attributes set for this participant when creating the token. If there is overlap in keys, the values in these attributes are replaced.

        " + } + } + }, + "StartParticipantReplicationResponse":{ + "type":"structure", + "members":{ + "accessControlAllowOrigin":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Allow-Origin" + }, + "accessControlExposeHeaders":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Expose-Headers" + }, + "cacheControl":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Cache-Control" + }, + "contentSecurityPolicy":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Content-Security-Policy" + }, + "strictTransportSecurity":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Strict-Transport-Security" + }, + "xContentTypeOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Content-Type-Options" + }, + "xFrameOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Frame-Options" + } + } + }, "StopCompositionRequest":{ "type":"structure", "required":["arn"], @@ -3280,6 +3574,75 @@ "members":{ } }, + "StopParticipantReplicationRequest":{ + "type":"structure", + "required":[ + "sourceStageArn", + "destinationStageArn", + "participantId" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is publishing.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant has been replicated.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that has been replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + } + } + }, + "StopParticipantReplicationResponse":{ + "type":"structure", + "members":{ + "accessControlAllowOrigin":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Allow-Origin" + }, + "accessControlExposeHeaders":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Expose-Headers" + }, + "cacheControl":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Cache-Control" + }, + "contentSecurityPolicy":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Content-Security-Policy" + }, + "strictTransportSecurity":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Strict-Transport-Security" + }, + "xContentTypeOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Content-Type-Options" + }, + "xFrameOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Frame-Options" + } + } + }, "StorageConfiguration":{ "type":"structure", "required":["arn"], @@ -3616,5 +3979,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

        The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

        Key Concepts

        • Stage — A virtual space where participants can exchange video in real time.

        • Participant token — A token that authenticates a participant when they join a stage.

        • Participant object — Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants.

        For server-side composition:

        • Composition process — Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition operations support this process.

        • Composition — Controls the look of the outputs, including how participants are positioned in the video.

        For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming.

        Tagging

        A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

        Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

        The Amazon IVS real-time API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

        At most 50 tags can be applied to a resource.

        " + "documentation":"

        The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

        Key Concepts

        • Stage — A virtual space where participants can exchange video in real time.

        • Participant token — A token that authenticates a participant when they join a stage.

        • Participant object — Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants.

        For server-side composition:

        • Composition process — Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition operations support this process.

        • Composition — Controls the look of the outputs, including how participants are positioned in the video.

        For participant replication:

        • Source stage — The stage where the participant originally joined, which is used as the source for replication.

        • Destination stage — The stage to which the participant is replicated.

        • Replicated participant — A participant in a stage that is replicated to one or more destination stages.

        • Replica participant — A participant in a destination stage that is replicated from another stage (the source stage).

        For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming.

        Tagging

        A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

        Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

        The Amazon IVS real-time API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

        At most 50 tags can be applied to a resource.

        " } diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index edea26a63b38..8d9d064b8af5 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index f675355fd86b..445f055833e9 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index 145c7cd4036e..632d04a9d370 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index e5c853b521d6..fc5010114555 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index 4011b6ca35eb..85d62997347d 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index ea4033ad4ae7..3b594d1e4fe4 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index 76e54a49a46c..161cdce585f5 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 8f8a1f310dee..3a7316abd3c8 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index fea2f07d3b86..7e441756a7ee 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 520b03a9f8b3..3dc80dbf4309 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 985a15cc0770..37f1379fa54a 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 76f27be80525..25624a9ab18c 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index f53021434c30..0159c2c47752 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 560bf45b3c6f..98fd85c27849 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/kms/src/main/resources/codegen-resources/service-2.json b/services/kms/src/main/resources/codegen-resources/service-2.json index 8e4c88b3ee10..ceefb57f956e 100644 --- a/services/kms/src/main/resources/codegen-resources/service-2.json +++ b/services/kms/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Cancels the deletion of a KMS key. When this operation succeeds, the key state of the KMS key is Disabled. To enable the KMS key, use EnableKey.

        For more information about scheduling and canceling deletion of a KMS key, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CancelKeyDeletion (key policy)

        Related operations: ScheduleKeyDeletion

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Cancels the deletion of a KMS key. When this operation succeeds, the key state of the KMS key is Disabled. To enable the KMS key, use EnableKey.

        For more information about scheduling and canceling deletion of a KMS key, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CancelKeyDeletion (key policy)

        Related operations: ScheduleKeyDeletion

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ConnectCustomKeyStore":{ "name":"ConnectCustomKeyStore", @@ -47,7 +47,7 @@ {"shape":"KMSInternalException"}, {"shape":"CloudHsmClusterInvalidConfigurationException"} ], - "documentation":"

        Connects or reconnects a custom key store to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore connects the key store to its associated CloudHSM cluster. For an external key store, ConnectCustomKeyStore connects the key store to the external key store proxy that communicates with your external key manager.

        The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

        The connection process for a custom key store can take an extended amount of time to complete. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

        CloudHSM key store

        During the connection process for an CloudHSM key store, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

        To connect an CloudHSM key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

        If you are having trouble connecting or disconnecting a CloudHSM key store, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        External key store

        When you connect an external key store that uses public endpoint connectivity, KMS tests its ability to communicate with your external key manager by sending a request via the external key store proxy.

        When you connect to an external key store that uses VPC endpoint service connectivity, KMS establishes the networking elements that it needs to communicate with your external key manager via the external key store proxy. This includes creating an interface endpoint to the VPC endpoint service and a private hosted zone for traffic between KMS and the VPC endpoint service.

        To connect an external key store, KMS must be able to connect to the external key store proxy, the external key store proxy must be able to communicate with your external key manager, and the external key manager must be available for cryptographic operations.

        If you are having trouble connecting or disconnecting an external key store, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:ConnectCustomKeyStore (IAM policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Connects or reconnects a custom key store to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore connects the key store to its associated CloudHSM cluster. For an external key store, ConnectCustomKeyStore connects the key store to the external key store proxy that communicates with your external key manager.

        The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

        The connection process for a custom key store can take an extended amount of time to complete. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

        CloudHSM key store

        During the connection process for an CloudHSM key store, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

        To connect an CloudHSM key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

        If you are having trouble connecting or disconnecting a CloudHSM key store, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        External key store

        When you connect an external key store that uses public endpoint connectivity, KMS tests its ability to communicate with your external key manager by sending a request via the external key store proxy.

        When you connect to an external key store that uses VPC endpoint service connectivity, KMS establishes the networking elements that it needs to communicate with your external key manager via the external key store proxy. This includes creating an interface endpoint to the VPC endpoint service and a private hosted zone for traffic between KMS and the VPC endpoint service.

        To connect an external key store, KMS must be able to connect to the external key store proxy, the external key store proxy must be able to communicate with your external key manager, and the external key manager must be available for cryptographic operations.

        If you are having trouble connecting or disconnecting an external key store, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:ConnectCustomKeyStore (IAM policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateAlias":{ "name":"CreateAlias", @@ -65,7 +65,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Creates a friendly name for a KMS key.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

        You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

        The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Using aliases in the Key Management Service Developer Guide.

        This operation does not return a response. To get the alias that you created, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a friendly name for a KMS key.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

        You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

        The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Aliases in KMS in the Key Management Service Developer Guide.

        This operation does not return a response. To get the alias that you created, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateCustomKeyStore":{ "name":"CreateCustomKeyStore", @@ -94,7 +94,7 @@ {"shape":"XksProxyInvalidResponseException"}, {"shape":"XksProxyInvalidConfigurationException"} ], - "documentation":"

        Creates a custom key store backed by a key store that you own and manage. When you use a KMS key in a custom key store for a cryptographic operation, the cryptographic operation is actually performed in your key store using your keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key store proxy and external key manager outside of Amazon Web Services.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        Before you create the custom key store, the required elements must be in place and operational. We recommend that you use the test tools that KMS provides to verify the configuration your external key store proxy. For details about the required elements and verification tests, see Assemble the prerequisites (for CloudHSM key stores) or Assemble the prerequisites (for external key stores) in the Key Management Service Developer Guide.

        To create a custom key store, use the following parameters.

        • To create an CloudHSM key store, specify the CustomKeyStoreName, CloudHsmClusterId, KeyStorePassword, and TrustAnchorCertificate. The CustomKeyStoreType parameter is optional for CloudHSM key stores. If you include it, set it to the default value, AWS_CLOUDHSM. For help with failures, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        • To create an external key store, specify the CustomKeyStoreName and a CustomKeyStoreType of EXTERNAL_KEY_STORE. Also, specify values for XksProxyConnectivity, XksProxyAuthenticationCredential, XksProxyUriEndpoint, and XksProxyUriPath. If your XksProxyConnectivity value is VPC_ENDPOINT_SERVICE, specify the XksProxyVpcEndpointServiceName parameter. For help with failures, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        For external key stores:

        Some external key managers provide a simpler method for creating an external key store. For details, see your external key manager documentation.

        When creating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot use a proxy configuration with the CreateCustomKeyStore operation. However, you can use the values in the file to help you determine the correct values for the CreateCustomKeyStore parameters.

        When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to its CloudHSM cluster, or to connect a new external key store to the external key store proxy for your external key manager. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

        For help with failures, see Troubleshooting a custom key store in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:CreateCustomKeyStore (IAM policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a custom key store backed by a key store that you own and manage. When you use a KMS key in a custom key store for a cryptographic operation, the cryptographic operation is actually performed in your key store using your keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key store proxy and external key manager outside of Amazon Web Services.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        Before you create the custom key store, the required elements must be in place and operational. We recommend that you use the test tools that KMS provides to verify the configuration your external key store proxy. For details about the required elements and verification tests, see Assemble the prerequisites (for CloudHSM key stores) or Assemble the prerequisites (for external key stores) in the Key Management Service Developer Guide.

        To create a custom key store, use the following parameters.

        • To create an CloudHSM key store, specify the CustomKeyStoreName, CloudHsmClusterId, KeyStorePassword, and TrustAnchorCertificate. The CustomKeyStoreType parameter is optional for CloudHSM key stores. If you include it, set it to the default value, AWS_CLOUDHSM. For help with failures, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        • To create an external key store, specify the CustomKeyStoreName and a CustomKeyStoreType of EXTERNAL_KEY_STORE. Also, specify values for XksProxyConnectivity, XksProxyAuthenticationCredential, XksProxyUriEndpoint, and XksProxyUriPath. If your XksProxyConnectivity value is VPC_ENDPOINT_SERVICE, specify the XksProxyVpcEndpointServiceName parameter. For help with failures, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        For external key stores:

        Some external key managers provide a simpler method for creating an external key store. For details, see your external key manager documentation.

        When creating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot use a proxy configuration with the CreateCustomKeyStore operation. However, you can use the values in the file to help you determine the correct values for the CreateCustomKeyStore parameters.

        When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to its CloudHSM cluster, or to connect a new external key store to the external key store proxy for your external key manager. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:CreateCustomKeyStore (IAM policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateGrant":{ "name":"CreateGrant", @@ -115,7 +115,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Adds a grant to a KMS key.

        A grant is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (DescribeKey) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for temporary permissions because you can create one, use its permissions, and delete it without changing your key policies or IAM policies.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        The CreateGrant operation returns a GrantToken and a GrantId.

        • When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. Once the grant has achieved eventual consistency, the grantee principal can use the permissions in the grant without identifying the grant.

          However, to use the permissions in the grant immediately, use the GrantToken that CreateGrant returns. For details, see Using a grant token in the Key Management Service Developer Guide .

        • The CreateGrant operation also returns a GrantId. You can use the GrantId and a key identifier to identify the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, use the ListGrants or ListRetirableGrants operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:CreateGrant (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Adds a grant to a KMS key.

        A grant is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (DescribeKey) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for temporary permissions because you can create one, use its permissions, and delete it without changing your key policies or IAM policies.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        The CreateGrant operation returns a GrantToken and a GrantId.

        • When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. Once the grant has achieved eventual consistency, the grantee principal can use the permissions in the grant without identifying the grant.

          However, to use the permissions in the grant immediately, use the GrantToken that CreateGrant returns. For details, see Using a grant token in the Key Management Service Developer Guide .

        • The CreateGrant operation also returns a GrantId. You can use the GrantId and a key identifier to identify the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, use the ListGrants or ListRetirableGrants operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:CreateGrant (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateKey":{ "name":"CreateKey", @@ -140,7 +140,7 @@ {"shape":"XksKeyAlreadyInUseException"}, {"shape":"XksKeyNotFoundException"} ], - "documentation":"

        Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

        A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

        Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

        KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

        To create different types of KMS keys, use the following guidance:

        Symmetric encryption KMS key

        By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

        To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

        If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

        Asymmetric KMS keys

        To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

        Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        HMAC KMS key

        To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

        HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

        Multi-Region primary keys
        Imported key material

        To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

        You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

        You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

        To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        Custom key store

        A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

        KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

        To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

        To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

        To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

        A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state.

        Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

        KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

        To create different types of KMS keys, use the following guidance:

        Symmetric encryption KMS key

        By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

        To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

        If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

        Asymmetric KMS keys

        To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

        Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, ML-DSA key pair or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify messages. KMS keys with ML-DSA key pairs can be used to sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        HMAC KMS key

        To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

        HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

        Multi-Region primary keys
        Imported key material

        To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

        You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

        You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

        To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material step 1. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        Custom key store

        A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

        KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

        To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

        To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

        To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Decrypt":{ "name":"Decrypt", @@ -163,7 +163,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

        You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

        Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using &IAM; policies. Otherwise, you might create an &IAM; policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

        Decrypt also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call Decrypt for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of the plaintext data, the response includes the plaintext data encrypted with the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. If you use the KeyId parameter to identify a KMS key in a different Amazon Web Services account, specify the key ARN or the alias ARN of the KMS key.

        Required permissions: kms:Decrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

        You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

        Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

        Decrypt also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call Decrypt for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of the plaintext data, the response includes the plaintext data encrypted with the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. If you use the KeyId parameter to identify a KMS key in a different Amazon Web Services account, specify the key ARN or the alias ARN of the KMS key.

        Required permissions: kms:Decrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeleteAlias":{ "name":"DeleteAlias", @@ -178,7 +178,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Deletes the specified alias.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

        Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes the specified alias.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

        Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeleteCustomKeyStore":{ "name":"DeleteCustomKeyStore", @@ -194,7 +194,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Deletes a custom key store. This operation does not affect any backing elements of the custom key store. It does not delete the CloudHSM cluster that is associated with an CloudHSM key store, or affect any users or keys in the cluster. For an external key store, it does not affect the external key store proxy, external key manager, or any external keys.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The custom key store that you delete cannot contain any KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After the required waiting period expires and all KMS keys are deleted from the custom key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

        For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. KMS never creates, manages, or deletes cryptographic keys in the external key manager associated with an external key store. You must manage them using your external key manager tools.

        Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore operation to disconnect the custom key store from its backing key store. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DeleteCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes a custom key store. This operation does not affect any backing elements of the custom key store. It does not delete the CloudHSM cluster that is associated with an CloudHSM key store, or affect any users or keys in the cluster. For an external key store, it does not affect the external key store proxy, external key manager, or any external keys.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The custom key store that you delete cannot contain any KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After the required waiting period expires and all KMS keys are deleted from the custom key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

        For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. KMS never creates, manages, or deletes cryptographic keys in the external key manager associated with an external key store. You must manage them using your external key manager tools.

        Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore operation to disconnect the custom key store from its backing key store. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DeleteCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeleteImportedKeyMaterial":{ "name":"DeleteImportedKeyMaterial", @@ -203,6 +203,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteImportedKeyMaterialRequest"}, + "output":{"shape":"DeleteImportedKeyMaterialResponse"}, "errors":[ {"shape":"InvalidArnException"}, {"shape":"UnsupportedOperationException"}, @@ -211,7 +212,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Deletes key material that was previously imported. This operation makes the specified KMS key temporarily unusable. To restore the usability of the KMS key, reimport the same key material. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

        When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DeleteImportedKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes key material that was previously imported. This operation makes the specified KMS key temporarily unusable. To restore the usability of the KMS key, reimport the same key material. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

        When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DeleteImportedKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeriveSharedSecret":{ "name":"DeriveSharedSecret", @@ -232,7 +233,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Derives a shared secret using a key agreement algorithm.

        You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret.

        DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key agreement between two peers by deriving a shared secret from their elliptic curve public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a symmetric key.

        The following workflow demonstrates how to establish key agreement over an insecure communication channel using DeriveSharedSecret.

        1. Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT.

          The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec.

        2. Bob creates an elliptic curve key pair.

          Bob can call CreateKey to create an asymmetric KMS key pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice.

        3. Alice and Bob exchange their public keys through an insecure communication channel (like the internet).

          Use GetPublicKey to download the public key of your asymmetric KMS key pair.

          KMS strongly recommends verifying that the public key you receive came from the expected party before using it to derive a shared secret.

        4. Alice calls DeriveSharedSecret.

          KMS uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret.

        5. Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's public key.

        To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the same elliptic curve.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DeriveSharedSecret (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Derives a shared secret using a key agreement algorithm.

        You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret.

        DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key agreement between two peers by deriving a shared secret from their elliptic curve public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a symmetric key.

        The following workflow demonstrates how to establish key agreement over an insecure communication channel using DeriveSharedSecret.

        1. Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT.

          The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec.

        2. Bob creates an elliptic curve key pair.

          Bob can call CreateKey to create an asymmetric KMS key pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice.

        3. Alice and Bob exchange their public keys through an insecure communication channel (like the internet).

          Use GetPublicKey to download the public key of your asymmetric KMS key pair.

          KMS strongly recommends verifying that the public key you receive came from the expected party before using it to derive a shared secret.

        4. Alice calls DeriveSharedSecret.

          KMS uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret.

        5. Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's public key.

        To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the same elliptic curve.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DeriveSharedSecret (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DescribeCustomKeyStores":{ "name":"DescribeCustomKeyStores", @@ -247,7 +248,7 @@ {"shape":"InvalidMarkerException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Gets information about custom key stores in the account and Region.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

        To determine whether the custom key store is connected to its CloudHSM cluster or external key store proxy, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you used the DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection state is CONNECTED. If your custom key store connection state is CONNECTED but you are having trouble using it, verify that the backing store is active and available. For an CloudHSM key store, verify that the associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any. For an external key store, verify that the external key store proxy and its associated external key manager are reachable and enabled.

        For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key stores. For help repairing your external key store, see the Troubleshooting external key stores. Both topics are in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DescribeCustomKeyStores (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets information about custom key stores in the account and Region.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

        To determine whether the custom key store is connected to its CloudHSM cluster or external key store proxy, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you used the DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection state is CONNECTED. If your custom key store connection state is CONNECTED but you are having trouble using it, verify that the backing store is active and available. For an CloudHSM key store, verify that the associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any. For an external key store, verify that the external key store proxy and its associated external key manager are reachable and enabled.

        For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key stores. For help repairing your external key store, see the Troubleshooting external key stores. Both topics are in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DescribeCustomKeyStores (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DescribeKey":{ "name":"DescribeKey", @@ -263,7 +264,7 @@ {"shape":"DependencyTimeoutException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

        This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports.

        For multi-Region keys, DescribeKey displays the primary key and all related replica keys. For KMS keys in CloudHSM key stores, it includes information about the key store, such as the key store ID and the CloudHSM cluster ID. For KMS keys in external key stores, it includes the custom key store ID and the ID of the external key.

        DescribeKey does not return the following information:

        • Aliases associated with the KMS key. To get this information, use ListAliases.

        • Whether automatic key rotation is enabled on the KMS key. To get this information, use GetKeyRotationStatus. Also, some key states prevent a KMS key from being automatically rotated. For details, see How Automatic Key Rotation Works in the Key Management Service Developer Guide.

        • Tags on the KMS key. To get this information, use ListResourceTags.

        • Key policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.

        In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DescribeKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

        This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports.

        For multi-Region keys, DescribeKey displays the primary key and all related replica keys. For KMS keys in CloudHSM key stores, it includes information about the key store, such as the key store ID and the CloudHSM cluster ID. For KMS keys in external key stores, it includes the custom key store ID and the ID of the external key.

        DescribeKey does not return the following information:

        • Aliases associated with the KMS key. To get this information, use ListAliases.

        • Whether automatic key rotation is enabled on the KMS key. To get this information, use GetKeyRotationStatus. Also, some key states prevent a KMS key from being automatically rotated. For details, see How key rotation works in the Key Management Service Developer Guide.

        • Tags on the KMS key. To get this information, use ListResourceTags.

        • Key policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.

        In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DescribeKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DisableKey":{ "name":"DisableKey", @@ -279,7 +280,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Sets the state of a KMS key to disabled. This change temporarily prevents use of the KMS key for cryptographic operations.

        For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKey (key policy)

        Related operations: EnableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Sets the state of a KMS key to disabled. This change temporarily prevents use of the KMS key for cryptographic operations.

        The KMS key that you use for this operation must be in a compatible key state. For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKey (key policy)

        Related operations: EnableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DisableKeyRotation":{ "name":"DisableKeyRotation", @@ -297,7 +298,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DisconnectCustomKeyStore":{ "name":"DisconnectCustomKeyStore", @@ -312,7 +313,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Disconnects the custom key store from its backing key store. This operation disconnects an CloudHSM key store from its associated CloudHSM cluster or disconnects an external key store from the external key store proxy that communicates with your external key manager.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use its KMS keys. You can reconnect the custom key store at any time.

        While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

        When you disconnect a custom key store, its ConnectionState changes to Disconnected. To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Disconnects the custom key store from its backing key store. This operation disconnects an CloudHSM key store from its associated CloudHSM cluster or disconnects an external key store from the external key store proxy that communicates with your external key manager.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use its KMS keys. You can reconnect the custom key store at any time.

        While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

        When you disconnect a custom key store, its ConnectionState changes to Disconnected. To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "EnableKey":{ "name":"EnableKey", @@ -329,7 +330,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Sets the key state of a KMS key to enabled. This allows you to use the KMS key for cryptographic operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKey (key policy)

        Related operations: DisableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Sets the key state of a KMS key to enabled. This allows you to use the KMS key for cryptographic operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKey (key policy)

        Related operations: DisableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "EnableKeyRotation":{ "name":"EnableKeyRotation", @@ -347,7 +348,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

        By default, when you enable automatic rotation of a customer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can use the optional RotationPeriodInDays parameter to specify a custom rotation period when you enable key rotation, or you can use RotationPeriodInDays to modify the rotation period of a key that you previously enabled automatic key rotation on.

        You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation. You can use the GetKeyRotationStatus operation to identify any in progress rotations. You can use the ListKeyRotations operation to view the details of completed rotations.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You cannot enable or disable automatic rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

        New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

        Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

        By default, when you enable automatic rotation of a customer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can use the optional RotationPeriodInDays parameter to specify a custom rotation period when you enable key rotation, or you can use RotationPeriodInDays to modify the rotation period of a key that you previously enabled automatic key rotation on.

        You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation. You can use the GetKeyRotationStatus operation to identify any in progress rotations. You can use the ListKeyRotations operation to view the details of completed rotations.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You cannot enable or disable automatic rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

        New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

        Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Encrypt":{ "name":"Encrypt", @@ -368,7 +369,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

        You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

        If you use a symmetric encryption KMS key, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The algorithm must be compatible with the KMS key spec.

        When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

        You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The maximum size of the data that you can encrypt varies with the type of KMS key and the encryption algorithm that you choose.

        • Symmetric encryption KMS keys

          • SYMMETRIC_DEFAULT: 4096 bytes

        • RSA_2048

          • RSAES_OAEP_SHA_1: 214 bytes

          • RSAES_OAEP_SHA_256: 190 bytes

        • RSA_3072

          • RSAES_OAEP_SHA_1: 342 bytes

          • RSAES_OAEP_SHA_256: 318 bytes

        • RSA_4096

          • RSAES_OAEP_SHA_1: 470 bytes

          • RSAES_OAEP_SHA_256: 446 bytes

        • SM2PKE: 1024 bytes (China Regions only)

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Encrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

        You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

        If you use a symmetric encryption KMS key, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The algorithm must be compatible with the KMS key spec.

        When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

        You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The maximum size of the data that you can encrypt varies with the type of KMS key and the encryption algorithm that you choose.

        • Symmetric encryption KMS keys

          • SYMMETRIC_DEFAULT: 4096 bytes

        • RSA_2048

          • RSAES_OAEP_SHA_1: 214 bytes

          • RSAES_OAEP_SHA_256: 190 bytes

        • RSA_3072

          • RSAES_OAEP_SHA_1: 342 bytes

          • RSAES_OAEP_SHA_256: 318 bytes

        • RSA_4096

          • RSAES_OAEP_SHA_1: 470 bytes

          • RSAES_OAEP_SHA_256: 446 bytes

        • SM2PKE: 1024 bytes (China Regions only)

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Encrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKey":{ "name":"GenerateDataKey", @@ -389,7 +390,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted data key with the encrypted data.

        To generate a data key, specify the symmetric encryption KMS key that will be used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec value of AES_128 or a NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        GenerateDataKey also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKey for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKey returns a copy of the data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the data key, the response includes a copy of the data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        How to use your data key

        We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the Amazon Web Services Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

        To encrypt data outside of KMS:

        1. Use the GenerateDataKey operation to get a data key.

        2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of KMS. Then erase the plaintext data key from memory.

        3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

        To decrypt data outside of KMS:

        1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

        2. Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext data key from memory.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted data key with the encrypted data.

        To generate a data key, specify the symmetric encryption KMS key that will be used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec value of AES_128 or a NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        GenerateDataKey also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKey for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKey returns a copy of the data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the data key, the response includes a copy of the data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        How to use your data key

        We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the Amazon Web Services Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

        To encrypt data outside of KMS:

        1. Use the GenerateDataKey operation to get a data key.

        2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of KMS. Then erase the plaintext data key from memory.

        3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

        To decrypt data outside of KMS:

        1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

        2. Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext data key from memory.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKeyPair":{ "name":"GenerateDataKeyPair", @@ -411,7 +412,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

        GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

        GenerateDataKeyPair also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKeyPair for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKeyPair returns the public data key and a copy of the private data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the private data key (PrivateKeyPlaintext), the response includes a copy of the private data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPair (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

        GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

        GenerateDataKeyPair also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKeyPair for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKeyPair returns the public data key and a copy of the private data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the private data key (PrivateKeyPlaintext), the response includes a copy of the private data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPair (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKeyPairWithoutPlaintext":{ "name":"GenerateDataKeyPairWithoutPlaintext", @@ -433,7 +434,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKeyWithoutPlaintext":{ "name":"GenerateDataKeyWithoutPlaintext", @@ -454,7 +455,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

        GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

        This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

        It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

        To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

        To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate an SM4 data key (China Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

        GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

        This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

        It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

        To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

        To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate an SM4 data key (China Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateMac":{ "name":"GenerateMac", @@ -474,7 +475,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        You can use value that GenerateMac returns in the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. You can also use the raw result to implement HMAC-based algorithms such as key derivation functions. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

        Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateMac (key policy)

        Related operations: VerifyMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        You can use value that GenerateMac returns in the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. You can also use the raw result to implement HMAC-based algorithms such as key derivation functions. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

        Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateMac (key policy)

        Related operations: VerifyMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateRandom":{ "name":"GenerateRandom", @@ -491,7 +492,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"CustomKeyStoreInvalidStateException"} ], - "documentation":"

        Returns a random byte string that is cryptographically secure.

        You must use the NumberOfBytes parameter to specify the length of the random byte string. There is no default value for string length.

        By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster associated with an CloudHSM key store, use the CustomKeyStoreId parameter.

        GenerateRandom also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateRandom for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of plaintext bytes, the response includes the plaintext bytes encrypted under the public key from the attestation document (CiphertextForRecipient).For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        For more information about entropy and random number generation, see Key Management Service Cryptographic Details.

        Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

        Required permissions: kms:GenerateRandom (IAM policy)

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a random byte string that is cryptographically secure.

        You must use the NumberOfBytes parameter to specify the length of the random byte string. There is no default value for string length.

        By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster associated with an CloudHSM key store, use the CustomKeyStoreId parameter.

        GenerateRandom also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateRandom for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of plaintext bytes, the response includes the plaintext bytes encrypted under the public key from the attestation document (CiphertextForRecipient).For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        For more information about entropy and random number generation, see Entropy and random number generation in the Key Management Service Developer Guide.

        Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

        Required permissions: kms:GenerateRandom (IAM policy)

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetKeyPolicy":{ "name":"GetKeyPolicy", @@ -508,7 +509,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Gets a key policy attached to the specified KMS key.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetKeyPolicy (key policy)

        Related operations: PutKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a key policy attached to the specified KMS key.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetKeyPolicy (key policy)

        Related operations: PutKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetKeyRotationStatus":{ "name":"GetKeyRotationStatus", @@ -526,7 +527,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Provides detailed information about the rotation status for a KMS key, including whether automatic rotation of the key material is enabled for the specified KMS key, the rotation period, and the next scheduled rotation date.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key..

        You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

        You can perform on-demand (RotateKeyOnDemand) rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. You can use GetKeyRotationStatus to identify the date and time that an in progress on-demand rotation was initiated. You can use ListKeyRotations to view the details of completed rotations.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        • Disabled: The key rotation status does not change when you disable a KMS key. However, while the KMS key is disabled, KMS does not rotate the key material. When you re-enable the KMS key, rotation resumes. If the key material in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it immediately, and every year thereafter. If it's been less than a year since the key material in the re-enabled KMS key was rotated, the KMS key resumes its prior rotation schedule.

        • Pending deletion: While a KMS key is pending deletion, its key rotation status is false and KMS does not rotate the key material. If you cancel the deletion, the original key rotation status returns to true.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:GetKeyRotationStatus (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Provides detailed information about the rotation status for a KMS key, including whether automatic rotation of the key material is enabled for the specified KMS key, the rotation period, and the next scheduled rotation date.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

        You can perform on-demand (RotateKeyOnDemand) rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. You can use GetKeyRotationStatus to identify the date and time that an in progress on-demand rotation was initiated. You can use ListKeyRotations to view the details of completed rotations.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        • Disabled: The key rotation status does not change when you disable a KMS key. However, while the KMS key is disabled, KMS does not rotate the key material. When you re-enable the KMS key, rotation resumes. If the key material in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it immediately, and every year thereafter. If it's been less than a year since the key material in the re-enabled KMS key was rotated, the KMS key resumes its prior rotation schedule.

        • Pending deletion: While a KMS key is pending deletion, its key rotation status is false and KMS does not rotate the key material. If you cancel the deletion, the original key rotation status returns to true.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:GetKeyRotationStatus (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetParametersForImport":{ "name":"GetParametersForImport", @@ -544,7 +545,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Returns the public key and an import token you need to import or reimport key material for a KMS key.

        By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

        Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted.

        GetParametersForImport returns the items that you need to import your key material.

        • The public key (or \"wrapping key\") of an RSA key pair that KMS generates.

          You will use this public key to encrypt (\"wrap\") your key material while it's in transit to KMS.

        • A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key.

        The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

        GetParametersForImport requires the following information:

        • The key ID of the KMS key for which you are importing the key material.

        • The key spec of the public key (\"wrapping key\") that you will use to encrypt your key material during import.

        • The wrapping algorithm that you will use with the public key to encrypt your key material.

        You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetParametersForImport (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns the public key and an import token you need to import or reimport key material for a KMS key.

        By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key.

        Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted.

        GetParametersForImport returns the items that you need to import your key material.

        • The public key (or \"wrapping key\") of an RSA key pair that KMS generates.

          You will use this public key to encrypt (\"wrap\") your key material while it's in transit to KMS.

        • A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key.

        The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

        GetParametersForImport requires the following information:

        • The key ID of the KMS key for which you are importing the key material.

        • The key spec of the public key (\"wrapping key\") that you will use to encrypt your key material during import.

        • The wrapping algorithm that you will use with the public key to encrypt your key material.

        You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetParametersForImport (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetPublicKey":{ "name":"GetPublicKey", @@ -566,7 +567,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

        To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

        • KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521.

        • KeyUsage: Whether the key is used for encryption, signing, or deriving a shared secret.

        • EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the key.

        Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GetPublicKey (key policy)

        Related operations: CreateKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

        To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

        Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GetPublicKey (key policy)

        Related operations: CreateKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ImportKeyMaterial":{ "name":"ImportKeyMaterial", @@ -588,7 +589,7 @@ {"shape":"ExpiredImportTokenException"}, {"shape":"InvalidImportTokenException"} ], - "documentation":"

        Imports or reimports key material into an existing KMS key that was created without key material. ImportKeyMaterial also sets the expiration model and expiration date of the imported key material.

        By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

        After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material. You might reimport key material to replace key material that expired or key material that you deleted. You might also reimport key material to change the expiration model or expiration date of the key material.

        Each time you import key material into KMS, you can determine whether (ExpirationModel) and when (ValidTo) the key material expires. To change the expiration of your key material, you must import it again, either by calling ImportKeyMaterial or using the import features of the KMS console.

        Before calling ImportKeyMaterial:

        • Create or identify a KMS key with no key material. The KMS key must have an Origin value of EXTERNAL, which indicates that the KMS key is designed for imported key material.

          To create an new KMS key for imported key material, call the CreateKey operation with an Origin value of EXTERNAL. You can create a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store.

        • Use the DescribeKey operation to verify that the KeyState of the KMS key is PendingImport, which indicates that the KMS key has no key material.

          If you are reimporting the same key material into an existing KMS key, you might need to call the DeleteImportedKeyMaterial to delete its existing key material.

        • Call the GetParametersForImport operation to get a public key and import token set for importing key material.

        • Use the public key in the GetParametersForImport response to encrypt your key material.

        Then, in an ImportKeyMaterial request, you submit your encrypted key material and import token. When calling this operation, you must specify the following values:

        • The key ID or key ARN of the KMS key to associate with the imported key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account. To get the Origin and KeyState of a KMS key, call DescribeKey.

        • The encrypted key material.

        • The import token that GetParametersForImport returned. You must use a public key and token from the same GetParametersForImport response.

        • Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

          If you set an expiration date, KMS deletes the key material from the KMS key on the specified date, making the KMS key unusable. To use the KMS key in cryptographic operations again, you must reimport the same key material. However, you can delete and reimport the key material at any time, including before the key material expires. Each time you reimport, you can eliminate or reset the expiration time.

        When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key in cryptographic operations.

        If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ImportKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Imports or reimports key material into an existing KMS key that was created without key material. You can also use this operation to set or update the expiration model and expiration date of the imported key material.

        By default, KMS creates KMS keys with key material that it generates. You can also generate and import your own key material. For more information about importing key material, see Importing key material.

        For asymmetric, HMAC and multi-Region keys, you cannot change the key material after the initial import. You can import multiple key materials into single-Region, symmetric encryption keys and rotate the key material on demand using RotateKeyOnDemand.

        After you import key material, you can reimport the same key material into that KMS key or, if the key supports on-demand rotation, import new key material. You can use the ImportType parameter to indicate whether you are importing new key material or re-importing previously imported key material. You might reimport key material to replace key material that expired or key material that you deleted. You might also reimport key material to change the expiration model or expiration date of the key material.

        Each time you import key material into KMS, you can determine whether (ExpirationModel) and when (ValidTo) the key material expires. To change the expiration of your key material, you must import it again, either by calling ImportKeyMaterial or using the import features of the KMS console.

        Before you call ImportKeyMaterial, complete these steps:

        • Create or identify a KMS key with EXTERNAL origin, which indicates that the KMS key is designed for imported key material.

          To create a new KMS key for imported key material, call the CreateKey operation with an Origin value of EXTERNAL. You can create a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, asymmetric key agreement key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store.

        • Call the GetParametersForImport operation to get a public key and import token set for importing key material.

        • Use the public key in the GetParametersForImport response to encrypt your key material.

        Then, in an ImportKeyMaterial request, you submit your encrypted key material and import token. When calling this operation, you must specify the following values:

        • The key ID or key ARN of the KMS key to associate with the imported key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account. To get the Origin and KeyState of a KMS key, call DescribeKey.

        • The encrypted key material.

        • The import token that GetParametersForImport returned. You must use a public key and token from the same GetParametersForImport response.

        • Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

          If you set an expiration date, KMS deletes the key material from the KMS key on the specified date, making the KMS key unusable. To use the KMS key in cryptographic operations again, you must reimport the same key material. However, you can delete and reimport the key material at any time, including before the key material expires. Each time you reimport, you can eliminate or reset the expiration time.

        When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key in cryptographic operations. For single-Region, symmetric encryption keys, you will need to import all of the key materials associated with the KMS key to change its state to Enabled. Use the ListKeyRotations operation to list the ID and import state of each key material associated with a KMS key.

        If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see Create a KMS key with imported key material in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ImportKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListAliases":{ "name":"ListAliases", @@ -605,7 +606,7 @@ {"shape":"InvalidArnException"}, {"shape":"NotFoundException"} ], - "documentation":"

        Gets a list of aliases in the caller's Amazon Web Services account and region. For more information about aliases, see CreateAlias.

        By default, the ListAliases operation returns all aliases in the account and region. To get only the aliases associated with a particular KMS key, use the KeyId parameter.

        The ListAliases response can include aliases that you created and associated with your customer managed keys, and aliases that Amazon Web Services created and associated with Amazon Web Services managed keys in your account. You can recognize Amazon Web Services aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

        The response might also include aliases that have no TargetKeyId field. These are predefined aliases that Amazon Web Services has created but has not yet associated with a KMS key. Aliases that Amazon Web Services creates in your account, including predefined aliases, do not count against your KMS aliases quota.

        Cross-account use: No. ListAliases does not return aliases in other Amazon Web Services accounts.

        Required permissions: kms:ListAliases (IAM policy)

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a list of aliases in the caller's Amazon Web Services account and region. For more information about aliases, see CreateAlias.

        By default, the ListAliases operation returns all aliases in the account and region. To get only the aliases associated with a particular KMS key, use the KeyId parameter.

        The ListAliases response can include aliases that you created and associated with your customer managed keys, and aliases that Amazon Web Services created and associated with Amazon Web Services managed keys in your account. You can recognize Amazon Web Services aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

        The response might also include aliases that have no TargetKeyId field. These are predefined aliases that Amazon Web Services has created but has not yet associated with a KMS key. Aliases that Amazon Web Services creates in your account, including predefined aliases, do not count against your KMS aliases quota.

        Cross-account use: No. ListAliases does not return aliases in other Amazon Web Services accounts.

        Required permissions: kms:ListAliases (IAM policy)

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListGrants":{ "name":"ListGrants", @@ -624,7 +625,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Gets a list of all grants for the specified KMS key.

        You must specify the KMS key in all requests. You can filter the grant list by grant ID or grantee principal.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an Amazon Web Services service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:ListGrants (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a list of all grants for the specified KMS key.

        You must specify the KMS key in all requests. You can filter the grant list by grant ID or grantee principal.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an Amazon Web Services service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:ListGrants (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListKeyPolicies":{ "name":"ListKeyPolicies", @@ -641,7 +642,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Gets the names of the key policies that are attached to a KMS key. This operation is designed to get policy names that you can use in a GetKeyPolicy operation. However, the only valid policy name is default.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyPolicies (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets the names of the key policies that are attached to a KMS key. This operation is designed to get policy names that you can use in a GetKeyPolicy operation. However, the only valid policy name is default.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyPolicies (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListKeyRotations":{ "name":"ListKeyRotations", @@ -659,7 +660,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Returns information about all completed key material rotations for the specified KMS key.

        You must specify the KMS key in all requests. You can refine the key rotations list by limiting the number of rotations returned.

        For detailed information about automatic and on-demand key rotations, see Rotating KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyRotations (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns information about the key materials associated with the specified KMS key. You can use the optional IncludeKeyMaterial parameter to control which key materials are included in the response.

        You must specify the KMS key in all requests. You can refine the key rotations list by limiting the number of rotations returned.

        For detailed information about automatic and on-demand key rotations, see Rotate KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyRotations (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListKeys":{ "name":"ListKeys", @@ -674,7 +675,7 @@ {"shape":"KMSInternalException"}, {"shape":"InvalidMarkerException"} ], - "documentation":"

        Gets a list of all KMS keys in the caller's Amazon Web Services account and Region.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeys (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a list of all KMS keys in the caller's Amazon Web Services account and Region.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeys (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListResourceTags":{ "name":"ListResourceTags", @@ -690,7 +691,7 @@ {"shape":"InvalidArnException"}, {"shape":"InvalidMarkerException"} ], - "documentation":"

        Returns all tags on the specified KMS key.

        For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference. For information about using tags in KMS, see Tagging keys.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListResourceTags (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns all tags on the specified KMS key.

        For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference. For information about using tags in KMS, see Tags in KMS.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListResourceTags (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListRetirableGrants":{ "name":"ListRetirableGrants", @@ -707,7 +708,7 @@ {"shape":"NotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Returns information about all grants in the Amazon Web Services account and Region that have the specified retiring principal.

        You can specify any principal in your Amazon Web Services account. The grants that are returned include grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        Cross-account use: You must specify a principal in your Amazon Web Services account. This operation returns a list of grants where the retiring principal specified in the ListRetirableGrants request is the same retiring principal on the grant. This can include grants on KMS keys owned by other Amazon Web Services accounts, but you do not need kms:ListRetirableGrants permission (or any other additional permission) in any Amazon Web Services account other than your own.

        Required permissions: kms:ListRetirableGrants (IAM policy) in your Amazon Web Services account.

        KMS authorizes ListRetirableGrants requests by evaluating the caller account's kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants calls is the retiring principal specified in the request. KMS does not evaluate the caller's permissions to verify their access to any KMS keys or grants that might be returned by the ListRetirableGrants call.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns information about all grants in the Amazon Web Services account and Region that have the specified retiring principal.

        You can specify any principal in your Amazon Web Services account. The grants that are returned include grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        Cross-account use: You must specify a principal in your Amazon Web Services account. This operation returns a list of grants where the retiring principal specified in the ListRetirableGrants request is the same retiring principal on the grant. This can include grants on KMS keys owned by other Amazon Web Services accounts, but you do not need kms:ListRetirableGrants permission (or any other additional permission) in any Amazon Web Services account other than your own.

        Required permissions: kms:ListRetirableGrants (IAM policy) in your Amazon Web Services account.

        KMS authorizes ListRetirableGrants requests by evaluating the caller account's kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants calls is the retiring principal specified in the request. KMS does not evaluate the caller's permissions to verify their access to any KMS keys or grants that might be returned by the ListRetirableGrants call.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "PutKeyPolicy":{ "name":"PutKeyPolicy", @@ -726,7 +727,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Attaches a key policy to the specified KMS key.

        For more information about key policies, see Key Policies in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide . For examples of adding a key policy in multiple programming languages, see Setting a key policy in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:PutKeyPolicy (key policy)

        Related operations: GetKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Attaches a key policy to the specified KMS key.

        For more information about key policies, see Key Policies in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide . For examples of adding a key policy in multiple programming languages, see Use PutKeyPolicy with an Amazon Web Services SDK or CLI in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:PutKeyPolicy (key policy)

        Related operations: GetKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ReEncrypt":{ "name":"ReEncrypt", @@ -749,7 +750,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this operation to change the KMS key under which data is encrypted, such as when you manually rotate a KMS key or change the KMS key that protects a ciphertext. You can also use it to reencrypt ciphertext under the same KMS key, such as to change the encryption context of a ciphertext.

        The ReEncrypt operation can decrypt ciphertext that was encrypted by using a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric KMS key outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

        • If your ciphertext was encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to identify the KMS key that encrypted the ciphertext. You must also supply the encryption algorithm that was used. This information is required to decrypt the data.

        • If your ciphertext was encrypted under a symmetric encryption KMS key, the SourceKeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the source KMS key is always recommended as a best practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses only the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the ReEncrypt operation fails. This practice ensures that you use the KMS key that you intend.

        • To reencrypt the data, you must use the DestinationKeyId parameter to specify the KMS key that re-encrypts the data after it is decrypted. If the destination KMS key is an asymmetric KMS key, you must also provide the encryption algorithm. The algorithm that you choose must be compatible with the KMS key.

          When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

          You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than the caller. To specify a KMS key in a different account, you must use its key ARN or alias ARN.

        Required permissions:

        To permit reencryption from or to a KMS key, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this operation to change the KMS key under which data is encrypted, such as when you manually rotate a KMS key or change the KMS key that protects a ciphertext. You can also use it to reencrypt ciphertext under the same KMS key, such as to change the encryption context of a ciphertext.

        The ReEncrypt operation can decrypt ciphertext that was encrypted by using a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric KMS key outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

        • If your ciphertext was encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to identify the KMS key that encrypted the ciphertext. You must also supply the encryption algorithm that was used. This information is required to decrypt the data.

        • If your ciphertext was encrypted under a symmetric encryption KMS key, the SourceKeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the source KMS key is always recommended as a best practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses only the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the ReEncrypt operation fails. This practice ensures that you use the KMS key that you intend.

        • To reencrypt the data, you must use the DestinationKeyId parameter to specify the KMS key that re-encrypts the data after it is decrypted. If the destination KMS key is an asymmetric KMS key, you must also provide the encryption algorithm. The algorithm that you choose must be compatible with the KMS key.

          When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

          You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than the caller. To specify a KMS key in a different account, you must use its key ARN or alias ARN.

        Required permissions:

        To permit reencryption from or to a KMS key, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ReplicateKey":{ "name":"ReplicateKey", @@ -771,7 +772,7 @@ {"shape":"TagException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Replicates a multi-Region key into the specified Region. This operation creates a multi-Region replica key based on a multi-Region primary key in a different Region of the same Amazon Web Services partition. You can create multiple replicas of a primary key, but each must be in a different Region. To create a multi-Region primary key, use the CreateKey operation.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        A replica key is a fully-functional KMS key that can be used independently of its primary and peer replica keys. A primary key and its replica keys share properties that make them interoperable. They have the same key ID and key material. They also have the same key spec, key usage, key material origin, and automatic key rotation status. KMS automatically synchronizes these shared properties among related multi-Region keys. All other properties of a replica key can differ, including its key policy, tags, aliases, and Key states of KMS keys. KMS pricing and quotas for KMS keys apply to each primary key and replica key.

        When this operation completes, the new replica key has a transient key state of Creating. This key state changes to Enabled (or PendingImport) after a few seconds when the process of creating the new replica key is complete. While the key state is Creating, you can manage key, but you cannot yet use it in cryptographic operations. If you are creating and using the replica key programmatically, retry on KMSInvalidStateException or call DescribeKey to check its KeyState value before using it. For details about the Creating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        You cannot create more than one replica of a primary key in any Region. If the Region already includes a replica of the key you're trying to replicate, ReplicateKey returns an AlreadyExistsException error. If the key state of the existing replica is PendingDeletion, you can cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be deleted. The new replica key you create will have the same shared properties as the original replica key.

        The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation in the primary key's Region and a CreateKey operation in the replica key's Region.

        If you replicate a multi-Region primary key with imported key material, the replica key is created with no key material. You must import the same key material that you imported into the primary key. For details, see Importing key material into multi-Region keys in the Key Management Service Developer Guide.

        To convert a replica key to a primary key, use the UpdatePrimaryRegion operation.

        ReplicateKey uses different default values for the KeyPolicy and Tags parameters than those used in the KMS console. For details, see the parameter descriptions.

        Cross-account use: No. You cannot use this operation to create a replica key in a different Amazon Web Services account.

        Required permissions:

        • kms:ReplicateKey on the primary key (in the primary key's Region). Include this permission in the primary key's key policy.

        • kms:CreateKey in an IAM policy in the replica Region.

        • To use the Tags parameter, kms:TagResource in an IAM policy in the replica Region.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Replicates a multi-Region key into the specified Region. This operation creates a multi-Region replica key based on a multi-Region primary key in a different Region of the same Amazon Web Services partition. You can create multiple replicas of a primary key, but each must be in a different Region. To create a multi-Region primary key, use the CreateKey operation.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        A replica key is a fully-functional KMS key that can be used independently of its primary and peer replica keys. A primary key and its replica keys share properties that make them interoperable. They have the same key ID and key material. They also have the same key spec, key usage, key material origin, and automatic key rotation status. KMS automatically synchronizes these shared properties among related multi-Region keys. All other properties of a replica key can differ, including its key policy, tags, aliases, and key state. KMS pricing and quotas for KMS keys apply to each primary key and replica key.

        When this operation completes, the new replica key has a transient key state of Creating. This key state changes to Enabled (or PendingImport) after a few seconds when the process of creating the new replica key is complete. While the key state is Creating, you can manage key, but you cannot yet use it in cryptographic operations. If you are creating and using the replica key programmatically, retry on KMSInvalidStateException or call DescribeKey to check its KeyState value before using it. For details about the Creating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        You cannot create more than one replica of a primary key in any Region. If the Region already includes a replica of the key you're trying to replicate, ReplicateKey returns an AlreadyExistsException error. If the key state of the existing replica is PendingDeletion, you can cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be deleted. The new replica key you create will have the same shared properties as the original replica key.

        The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation in the primary key's Region and a CreateKey operation in the replica key's Region.

        If you replicate a multi-Region primary key with imported key material, the replica key is created with no key material. You must import the same key material that you imported into the primary key.

        To convert a replica key to a primary key, use the UpdatePrimaryRegion operation.

        ReplicateKey uses different default values for the KeyPolicy and Tags parameters than those used in the KMS console. For details, see the parameter descriptions.

        Cross-account use: No. You cannot use this operation to create a replica key in a different Amazon Web Services account.

        Required permissions:

        • kms:ReplicateKey on the primary key (in the primary key's Region). Include this permission in the primary key's key policy.

        • kms:CreateKey in an IAM policy in the replica Region.

        • To use the Tags parameter, kms:TagResource in an IAM policy in the replica Region.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "RetireGrant":{ "name":"RetireGrant", @@ -790,7 +791,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To identify the grant to retire, use a grant token, or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. The CreateGrant operation returns both values.

        This operation can be called by the retiring principal for a grant, by the grantee principal if the grant allows the RetireGrant operation, and by the Amazon Web Services account in which the grant is created. It can also be called by principals to whom permission for retiring a grant is delegated. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web Services account.

        Required permissions: Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To identify the grant to retire, use a grant token, or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. The CreateGrant operation returns both values.

        This operation can be called by the retiring principal for a grant, by the grantee principal if the grant allows the RetireGrant operation, and by the Amazon Web Services account in which the grant is created. It can also be called by principals to whom permission for retiring a grant is delegated.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web Services account.

        Required permissions: Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "RevokeGrant":{ "name":"RevokeGrant", @@ -808,7 +809,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Deletes the specified grant. You revoke a grant to terminate the permissions that the grant allows. For more information, see Retiring and revoking grants in the Key Management Service Developer Guide .

        When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. For details, see Eventual consistency in the Key Management Service Developer Guide .

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:RevokeGrant (key policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes the specified grant. You revoke a grant to terminate the permissions that the grant allows. For more information, see Retiring and revoking grants in the Key Management Service Developer Guide .

        When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. For details, see Eventual consistency in the Key Management Service Developer Guide .

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:RevokeGrant (key policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "RotateKeyOnDemand":{ "name":"RotateKeyOnDemand", @@ -829,7 +830,7 @@ {"shape":"LimitExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

        Immediately initiates rotation of the key material of the specified symmetric encryption KMS key.

        You can perform on-demand rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. On-demand rotations do not change existing automatic rotation schedules. For example, consider a KMS key that has automatic key rotation enabled with a rotation period of 730 days. If the key is scheduled to automatically rotate on April 14, 2024, and you perform an on-demand rotation on April 10, 2024, the key will automatically rotate, as scheduled, on April 14, 2024 and every 730 days thereafter.

        You can perform on-demand key rotation a maximum of 10 times per KMS key. You can use the KMS console to view the number of remaining on-demand rotations available for a KMS key.

        You can use GetKeyRotationStatus to identify any in progress on-demand rotations. You can use ListKeyRotations to identify the date that completed on-demand rotations were performed. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

        On-demand key rotation is supported only on symmetric encryption KMS keys. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        You cannot initiate on-demand rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:RotateKeyOnDemand (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Immediately initiates rotation of the key material of the specified symmetric encryption KMS key.

        You can perform on-demand rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. On-demand rotations do not change existing automatic rotation schedules. For example, consider a KMS key that has automatic key rotation enabled with a rotation period of 730 days. If the key is scheduled to automatically rotate on April 14, 2024, and you perform an on-demand rotation on April 10, 2024, the key will automatically rotate, as scheduled, on April 14, 2024 and every 730 days thereafter.

        You can perform on-demand key rotation a maximum of 10 times per KMS key. You can use the KMS console to view the number of remaining on-demand rotations available for a KMS key.

        You can use GetKeyRotationStatus to identify any in progress on-demand rotations. You can use ListKeyRotations to identify the date that completed on-demand rotations were performed. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

        On-demand key rotation is supported only on symmetric encryption KMS keys. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, multi-Region KMS keys with imported key material, or KMS keys in a custom key store. When you initiate on-demand key rotation on a symmetric encryption KMS key with imported key material, you must have already imported new key material and that key material's state should be PENDING_ROTATION. Use the ListKeyRotations operation to check the state of all key materials associated with a KMS key. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        You cannot initiate on-demand rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:RotateKeyOnDemand (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ScheduleKeyDeletion":{ "name":"ScheduleKeyDeletion", @@ -846,7 +847,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

        Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key, or an asymmetric or HMAC KMS key with imported key material.) To prevent the use of a KMS key without deleting it, use DisableKey.

        You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

        When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

        For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ScheduleKeyDeletion (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

        Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key, or an asymmetric or HMAC KMS key with imported key material.) To prevent the use of a KMS key without deleting it, use DisableKey.

        You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

        When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

        For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ScheduleKeyDeletion (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Sign":{ "name":"Sign", @@ -867,7 +868,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        Digital signatures are generated and verified by using asymmetric key pair, such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

        To use the Sign operation, provide the following information:

        • Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS key, use the DescribeKey operation. The caller must have kms:Sign permission on the KMS key.

        • Use the Message parameter to specify the message or message digest to sign. You can submit messages of up to 4096 bytes. To sign a larger message, generate a hash digest of the message, and then provide the hash digest in the Message parameter. To indicate whether the message is a full message or a digest, use the MessageType parameter.

        • Choose a signing algorithm that is compatible with the KMS key.

        When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

        Best practices recommend that you limit the time during which any signature is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. Signatures do not include a timestamp, but you can include a timestamp in the signed message to help you detect when its time to refresh the signature.

        To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Sign (key policy)

        Related operations: Verify

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        Digital signatures are generated and verified by using asymmetric key pair, such as an RSA, ECC, or ML-DSA pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

        To use the Sign operation, provide the following information:

        • Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS key, use the DescribeKey operation. The caller must have kms:Sign permission on the KMS key.

        • Use the Message parameter to specify the message or message digest to sign. You can submit messages of up to 4096 bytes. To sign a larger message, generate a hash digest of the message, and then provide the hash digest in the Message parameter. To indicate whether the message is a full message, a digest, or an ML-DSA EXTERNAL_MU, use the MessageType parameter.

        • Choose a signing algorithm that is compatible with the KMS key.

        When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

        Best practices recommend that you limit the time during which any signature is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. Signatures do not include a timestamp, but you can include a timestamp in the signed message to help you detect when its time to refresh the signature.

        To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Sign (key policy)

        Related operations: Verify

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "TagResource":{ "name":"TagResource", @@ -884,7 +885,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagException"} ], - "documentation":"

        Adds or edits tags on a customer managed key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

        You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

        You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:TagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Adds or edits tags on a customer managed key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

        You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

        You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:TagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UntagResource":{ "name":"UntagResource", @@ -900,7 +901,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"TagException"} ], - "documentation":"

        Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UntagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UntagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdateAlias":{ "name":"UpdateAlias", @@ -916,7 +917,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        The current and new KMS key must be the same type (both symmetric or both asymmetric or both HMAC), and they must have the same key usage. This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        The current and new KMS key must be the same type (both symmetric or both asymmetric or both HMAC), and they must have the same key usage. This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdateCustomKeyStore":{ "name":"UpdateCustomKeyStore", @@ -945,7 +946,7 @@ {"shape":"XksProxyInvalidResponseException"}, {"shape":"XksProxyInvalidConfigurationException"} ], - "documentation":"

        Changes the properties of a custom key store. You can use this operation to change the properties of an CloudHSM key store or an external key store.

        Use the required CustomKeyStoreId parameter to identify the custom key store. Use the remaining optional parameters to change its properties. This operation does not return any property values. To verify the updated property values, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        When updating the properties of an external key store, verify that the updated settings connect your key store, via the external key store proxy, to the same external key manager as the previous settings, or to a backup or snapshot of the external key manager with the same cryptographic keys. If the updated connection settings fail, you can fix them and retry, although an extended delay might disrupt Amazon Web Services services. However, if KMS permanently loses its access to cryptographic keys, ciphertext encrypted under those keys is unrecoverable.

        For external key stores:

        Some external key managers provide a simpler method for updating an external key store. For details, see your external key manager documentation.

        When updating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot upload the proxy configuration file to the UpdateCustomKeyStore operation. However, you can use the file to help you determine the correct values for the UpdateCustomKeyStore parameters.

        For an CloudHSM key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser crypto user password (KeyStorePassword), or to associate the custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update any property of an CloudHSM key store, the ConnectionState of the CloudHSM key store must be DISCONNECTED.

        For an external key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change to the external key store proxy authentication credentials (XksProxyAuthenticationCredential), connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name (XksProxyVpcEndpointServiceName). To update most properties of an external key store, the ConnectionState of the external key store must be DISCONNECTED. However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store when it is in the CONNECTED or DISCONNECTED state.

        If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore, use the DisconnectCustomKeyStore operation to disconnect the custom key store. After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to reconnect the custom key store. To find the ConnectionState of the custom key store, use the DescribeCustomKeyStores operation.

        Before updating the custom key store, verify that the new values allow KMS to connect the custom key store to its backing key store. For example, before you change the XksProxyUriPath value, verify that the external key store proxy is reachable at the new path.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:UpdateCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Changes the properties of a custom key store. You can use this operation to change the properties of an CloudHSM key store or an external key store.

        Use the required CustomKeyStoreId parameter to identify the custom key store. Use the remaining optional parameters to change its properties. This operation does not return any property values. To verify the updated property values, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        When updating the properties of an external key store, verify that the updated settings connect your key store, via the external key store proxy, to the same external key manager as the previous settings, or to a backup or snapshot of the external key manager with the same cryptographic keys. If the updated connection settings fail, you can fix them and retry, although an extended delay might disrupt Amazon Web Services services. However, if KMS permanently loses its access to cryptographic keys, ciphertext encrypted under those keys is unrecoverable.

        For external key stores:

        Some external key managers provide a simpler method for updating an external key store. For details, see your external key manager documentation.

        When updating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot upload the proxy configuration file to the UpdateCustomKeyStore operation. However, you can use the file to help you determine the correct values for the UpdateCustomKeyStore parameters.

        For an CloudHSM key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser crypto user password (KeyStorePassword), or to associate the custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update any property of an CloudHSM key store, the ConnectionState of the CloudHSM key store must be DISCONNECTED.

        For an external key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change to the external key store proxy authentication credentials (XksProxyAuthenticationCredential), connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name (XksProxyVpcEndpointServiceName). To update most properties of an external key store, the ConnectionState of the external key store must be DISCONNECTED. However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store when it is in the CONNECTED or DISCONNECTED state.

        If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore, use the DisconnectCustomKeyStore operation to disconnect the custom key store. After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to reconnect the custom key store. To find the ConnectionState of the custom key store, use the DescribeCustomKeyStores operation.

        Before updating the custom key store, verify that the new values allow KMS to connect the custom key store to its backing key store. For example, before you change the XksProxyUriPath value, verify that the external key store proxy is reachable at the new path.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:UpdateCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdateKeyDescription":{ "name":"UpdateKeyDescription", @@ -961,7 +962,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Updates the description of a KMS key. To see the description of a KMS key, use DescribeKey.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UpdateKeyDescription (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Updates the description of a KMS key. To see the description of a KMS key, use DescribeKey.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UpdateKeyDescription (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdatePrimaryRegion":{ "name":"UpdatePrimaryRegion", @@ -978,7 +979,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Changes the primary key of a multi-Region key.

        This operation changes the replica key in the specified Region to a primary key and changes the former primary key to a replica key. For example, suppose you have a primary key in us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, and the key in us-east-1 becomes a replica key. For details, see Updating the primary Region in the Key Management Service Developer Guide.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        The primary key of a multi-Region key is the source for properties that are always shared by primary and replica keys, including the key material, key ID, key spec, key usage, key material origin, and automatic key rotation. It's the only key that can be replicated. You cannot delete the primary key until all replica keys are deleted.

        The key ID and primary Region that you specify uniquely identify the replica key that will become the primary key. The primary Region must already have a replica key. This operation does not create a KMS key in the specified Region. To find the replica keys, use the DescribeKey operation on the primary key or any replica key. To create a replica key, use the ReplicateKey operation.

        You can run this operation while using the affected multi-Region keys in cryptographic operations. This operation should not delay, interrupt, or cause failures in cryptographic operations.

        Even after this operation completes, the process of updating the primary Region might still be in progress for a few more seconds. Operations such as DescribeKey might display both the old and new primary keys as replicas. The old and new primary keys have a transient key state of Updating. The original key state is restored when the update is complete. While the key state is Updating, you can use the keys in cryptographic operations, but you cannot replicate the new primary key or perform certain management operations, such as enabling or disabling these keys. For details about the Updating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        This operation does not return any output. To verify that primary key is changed, use the DescribeKey operation.

        Cross-account use: No. You cannot use this operation in a different Amazon Web Services account.

        Required permissions:

        • kms:UpdatePrimaryRegion on the current primary key (in the primary key's Region). Include this permission primary key's key policy.

        • kms:UpdatePrimaryRegion on the current replica key (in the replica key's Region). Include this permission in the replica key's key policy.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Changes the primary key of a multi-Region key.

        This operation changes the replica key in the specified Region to a primary key and changes the former primary key to a replica key. For example, suppose you have a primary key in us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, and the key in us-east-1 becomes a replica key. For details, see Change the primary key in a set of multi-Region keys in the Key Management Service Developer Guide.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        The primary key of a multi-Region key is the source for properties that are always shared by primary and replica keys, including the key material, key ID, key spec, key usage, key material origin, and automatic key rotation. It's the only key that can be replicated. You cannot delete the primary key until all replica keys are deleted.

        The key ID and primary Region that you specify uniquely identify the replica key that will become the primary key. The primary Region must already have a replica key. This operation does not create a KMS key in the specified Region. To find the replica keys, use the DescribeKey operation on the primary key or any replica key. To create a replica key, use the ReplicateKey operation.

        You can run this operation while using the affected multi-Region keys in cryptographic operations. This operation should not delay, interrupt, or cause failures in cryptographic operations.

        Even after this operation completes, the process of updating the primary Region might still be in progress for a few more seconds. Operations such as DescribeKey might display both the old and new primary keys as replicas. The old and new primary keys have a transient key state of Updating. The original key state is restored when the update is complete. While the key state is Updating, you can use the keys in cryptographic operations, but you cannot replicate the new primary key or perform certain management operations, such as enabling or disabling these keys. For details about the Updating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        This operation does not return any output. To verify that primary key is changed, use the DescribeKey operation.

        Cross-account use: No. You cannot use this operation in a different Amazon Web Services account.

        Required permissions:

        • kms:UpdatePrimaryRegion on the current primary key (in the primary key's Region). Include this permission primary key's key policy.

        • kms:UpdatePrimaryRegion on the current replica key (in the replica key's Region). Include this permission in the replica key's key policy.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Verify":{ "name":"Verify", @@ -1000,7 +1001,7 @@ {"shape":"KMSInvalidSignatureException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Verifies a digital signature that was generated by the Sign operation.

        Verification confirms that an authorized user signed the message with the specified KMS key and signing algorithm, and the message hasn't changed since it was signed. If the signature is verified, the value of the SignatureValid field in the response is True. If the signature verification fails, the Verify operation fails with an KMSInvalidSignatureException exception.

        A digital signature is generated by using the private key in an asymmetric KMS key. The signature is verified by using the public key in the same asymmetric KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        To use the Verify operation, specify the same asymmetric KMS key, message, and signing algorithm that were used to produce the signature. The message type does not need to be the same as the one used for signing, but it must indicate whether the value of the Message parameter should be hashed as part of the verification process.

        You can also verify the digital signature by using the public key of the KMS key outside of KMS. Use the GetPublicKey operation to download the public key in the asymmetric KMS key and then use the public key to verify the signature outside of KMS. The advantage of using the Verify operation is that it is performed within KMS. As a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use the KMS key to verify signatures.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Verify (key policy)

        Related operations: Sign

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Verifies a digital signature that was generated by the Sign operation.

        Verification confirms that an authorized user signed the message with the specified KMS key and signing algorithm, and the message hasn't changed since it was signed. If the signature is verified, the value of the SignatureValid field in the response is True. If the signature verification fails, the Verify operation fails with an KMSInvalidSignatureException exception.

        A digital signature is generated by using the private key in an asymmetric KMS key. The signature is verified by using the public key in the same asymmetric KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        To use the Verify operation, specify the same asymmetric KMS key, message, and signing algorithm that were used to produce the signature. The message type does not need to be the same as the one used for signing, but it must indicate whether the value of the Message parameter should be hashed as part of the verification process.

        You can also verify the digital signature by using the public key of the KMS key outside of KMS. Use the GetPublicKey operation to download the public key in the asymmetric KMS key and then use the public key to verify the signature outside of KMS. The advantage of using the Verify operation is that it is performed within KMS. As a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use the KMS key to verify signatures.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Verify (key policy)

        Related operations: Sign

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "VerifyMac":{ "name":"VerifyMac", @@ -1021,7 +1022,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Verifies the hash-based message authentication code (HMAC) for a specified message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and compares the computed HMAC to the HMAC that you specify. If the HMACs are identical, the verification succeeds; otherwise, it fails. Verification indicates that the message hasn't changed since the HMAC was calculated, and the specified key was used to generate and verify the HMAC.

        HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:VerifyMac (key policy)

        Related operations: GenerateMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Verifies the hash-based message authentication code (HMAC) for a specified message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and compares the computed HMAC to the HMAC that you specify. If the HMACs are identical, the verification succeeds; otherwise, it fails. Verification indicates that the message hasn't changed since the HMAC was calculated, and the specified key was used to generate and verify the HMAC.

        HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:VerifyMac (key policy)

        Related operations: GenerateMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " } }, "shapes":{ @@ -1091,6 +1092,18 @@ "max":262144, "min":1 }, + "BackingKeyIdResponseType":{ + "type":"string", + "max":64, + "min":0, + "pattern":"^[a-f0-9]+$" + }, + "BackingKeyIdType":{ + "type":"string", + "max":64, + "min":64, + "pattern":"^[a-f0-9]+$" + }, "BooleanType":{"type":"boolean"}, "CancelKeyDeletionRequest":{ "type":"structure", @@ -1182,8 +1195,7 @@ }, "ConnectCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ConnectionErrorCodeType":{ "type":"string", @@ -1231,7 +1243,7 @@ }, "TargetKeyId":{ "shape":"KeyIdType", - "documentation":"

        Associates the alias with the specified customer managed key. The KMS key must be in the same Amazon Web Services Region.

        A valid key ID is required. If you supply a null or empty string value, this operation returns an error.

        For help finding the key ID and ARN, see Finding the Key ID and ARN in the Key Management Service Developer Guide .

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Associates the alias with the specified customer managed key. The KMS key must be in the same Amazon Web Services Region.

        A valid key ID is required. If you supply a null or empty string value, this operation returns an error.

        For help finding the key ID and ARN, see Find the key ID and key ARN in the Key Management Service Developer Guide .

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " } } }, @@ -1253,7 +1265,7 @@ }, "KeyStorePassword":{ "shape":"KeyStorePasswordType", - "documentation":"

        Specifies the kmsuser password for an CloudHSM key store. This parameter is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

        Enter the password of the kmsuser crypto user (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this user to manage key material on your behalf.

        The password must be a string of 7 to 32 characters. Its value is case sensitive.

        This parameter tells KMS the kmsuser account password; it does not change the password in the CloudHSM cluster.

        " + "documentation":"

        Specifies the kmsuser password for an CloudHSM key store. This parameter is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

        Enter the password of the kmsuser crypto user (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this user to manage key material on your behalf.

        The password must be a string of 7 to 32 characters. Its value is case sensitive.

        This parameter tells KMS the kmsuser account password; it does not change the password in the CloudHSM cluster.

        " }, "CustomKeyStoreType":{ "shape":"CustomKeyStoreType", @@ -1273,11 +1285,11 @@ }, "XksProxyAuthenticationCredential":{ "shape":"XksProxyAuthenticationCredentialType", - "documentation":"

        Specifies an authentication credential for the external key store proxy (XKS proxy). This parameter is required for all custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey. For character requirements, see XksProxyAuthenticationCredentialType.

        KMS uses this authentication credential to sign requests to the external key store proxy on your behalf. This credential is unrelated to Identity and Access Management (IAM) and Amazon Web Services credentials.

        This parameter doesn't set or change the authentication credentials on the XKS proxy. It just tells KMS the credential that you established on your external key store proxy. If you rotate your proxy authentication credential, use the UpdateCustomKeyStore operation to provide the new credential to KMS.

        " + "documentation":"

        Specifies an authentication credential for the external key store proxy (XKS proxy). This parameter is required for all custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey. For character requirements, see XksProxyAuthenticationCredentialType.

        KMS uses this authentication credential to sign requests to the external key store proxy on your behalf. This credential is unrelated to Identity and Access Management (IAM) and Amazon Web Services credentials.

        This parameter doesn't set or change the authentication credentials on the XKS proxy. It just tells KMS the credential that you established on your external key store proxy. If you rotate your proxy authentication credential, use the UpdateCustomKeyStore operation to provide the new credential to KMS.

        " }, "XksProxyConnectivity":{ "shape":"XksProxyConnectivityType", - "documentation":"

        Indicates how KMS communicates with the external key store proxy. This parameter is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see Choosing a connectivity option in the Key Management Service Developer Guide.

        An Amazon VPC endpoint service keeps your communication with KMS in a private address space entirely within Amazon Web Services, but it requires more configuration, including establishing a Amazon VPC with multiple subnets, a VPC endpoint service, a network load balancer, and a verified private DNS name. A public endpoint is simpler to set up, but it might be slower and might not fulfill your security requirements. You might consider testing with a public endpoint, and then establishing a VPC endpoint service for production tasks. Note that this choice does not determine the location of the external key store proxy. Even if you choose a VPC endpoint service, the proxy can be hosted within the VPC or outside of Amazon Web Services such as in your corporate data center.

        " + "documentation":"

        Indicates how KMS communicates with the external key store proxy. This parameter is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see Choosing a connectivity option in the Key Management Service Developer Guide.

        An Amazon VPC endpoint service keeps your communication with KMS in a private address space entirely within Amazon Web Services, but it requires more configuration, including establishing a Amazon VPC with multiple subnets, a VPC endpoint service, a network load balancer, and a verified private DNS name. A public endpoint is simpler to set up, but it might be slower and might not fulfill your security requirements. You might consider testing with a public endpoint, and then establishing a VPC endpoint service for production tasks. Note that this choice does not determine the location of the external key store proxy. Even if you choose a VPC endpoint service, the proxy can be hosted within the VPC or outside of Amazon Web Services such as in your corporate data center.

        " } } }, @@ -1308,7 +1320,7 @@ }, "RetiringPrincipal":{ "shape":"PrincipalIdType", - "documentation":"

        The principal that has permission to use the RetireGrant operation to retire the grant.

        To specify the principal, use the Amazon Resource Name (ARN) of an Amazon Web Services principal. Valid principals include Amazon Web Services accounts, IAM users, IAM roles, federated users, and assumed role users. For help with the ARN syntax for a principal, see IAM ARNs in the Identity and Access Management User Guide .

        The grant determines the retiring principal. Other principals might have permission to retire the grant or revoke the grant. For details, see RevokeGrant and Retiring and revoking grants in the Key Management Service Developer Guide.

        " + "documentation":"

        The principal that has permission to use the RetireGrant operation to retire the grant.

        To specify the principal, use the Amazon Resource Name (ARN) of an Amazon Web Services principal. Valid principals include Amazon Web Services accounts, IAM users, IAM roles, federated users, and assumed role users. For help with the ARN syntax for a principal, see IAM ARNs in the Identity and Access Management User Guide .

        The grant determines the retiring principal. Other principals might have permission to retire the grant or revoke the grant. For details, see RevokeGrant and Retiring and revoking grants in the Key Management Service Developer Guide.

        " }, "Operations":{ "shape":"GrantOperationList", @@ -1320,7 +1332,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "Name":{ "shape":"GrantNameType", @@ -1328,7 +1340,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1337,7 +1349,7 @@ "members":{ "GrantToken":{ "shape":"GrantTokenType", - "documentation":"

        The grant token.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        The grant token.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "GrantId":{ "shape":"GrantIdType", @@ -1350,7 +1362,7 @@ "members":{ "Policy":{ "shape":"PolicyType", - "documentation":"

        The key policy to attach to the KMS key.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

        The key policy size quota is 32 kilobytes (32768 bytes).

        For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " + "documentation":"

        The key policy to attach to the KMS key.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        If either of the required Resource or Action elements are missing from a key policy statement, the policy statement has no effect. When a key policy statement is missing one of these elements, the KMS console correctly reports an error, but the CreateKey and PutKeyPolicy API requests succeed, even though the policy statement is ineffective.

        For more information on required key policy elements, see Elements in a key policy in the Key Management Service Developer Guide.

        If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

        If the key policy exceeds the length constraint, KMS returns a LimitExceededException.

        For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " }, "Description":{ "shape":"DescriptionType", @@ -1358,7 +1370,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

        Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created.

        Select only one valid value.

        • For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.

        • For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

        • For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

        • For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT.

        • For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify SIGN_VERIFY.

        • For asymmetric KMS keys with SM2 key pairs (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.

        " + "documentation":"

        Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created.

        Select only one valid value.

        • For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.

        • For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

        • For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

        • For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT.

        • For asymmetric KMS keys with ECC_SECG_P256K1 key pairs, specify SIGN_VERIFY.

        • For asymmetric KMS keys with ML-DSA key pairs, specify SIGN_VERIFY.

        • For asymmetric KMS keys with SM2 key pairs (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.

        " }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", @@ -1368,7 +1380,7 @@ }, "KeySpec":{ "shape":"KeySpec", - "documentation":"

        Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

        The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

        Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

        KMS supports the following key specs for KMS keys:

        • Symmetric encryption key (default)

          • SYMMETRIC_DEFAULT

        • HMAC keys (symmetric)

          • HMAC_224

          • HMAC_256

          • HMAC_384

          • HMAC_512

        • Asymmetric RSA key pairs (encryption and decryption -or- signing and verification)

          • RSA_2048

          • RSA_3072

          • RSA_4096

        • Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets)

          • ECC_NIST_P256 (secp256r1)

          • ECC_NIST_P384 (secp384r1)

          • ECC_NIST_P521 (secp521r1)

        • Other asymmetric elliptic curve key pairs (signing and verification)

          • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

        • SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets)

          • SM2 (China Regions only)

        " + "documentation":"

        Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For a detailed description of all supported key specs, see Key spec reference in the Key Management Service Developer Guide .

        The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm, kms:KeyAgreementAlgorithm, or kms:SigningAlgorithm in the Key Management Service Developer Guide .

        Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

        KMS supports the following key specs for KMS keys:

        • Symmetric encryption key (default)

          • SYMMETRIC_DEFAULT

        • HMAC keys (symmetric)

          • HMAC_224

          • HMAC_256

          • HMAC_384

          • HMAC_512

        • Asymmetric RSA key pairs (encryption and decryption -or- signing and verification)

          • RSA_2048

          • RSA_3072

          • RSA_4096

        • Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets)

          • ECC_NIST_P256 (secp256r1)

          • ECC_NIST_P384 (secp384r1)

          • ECC_NIST_P521 (secp521r1)

        • Other asymmetric elliptic curve key pairs (signing and verification)

          • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

        • Asymmetric ML-DSA key pairs (signing and verification)

          • ML_DSA_44

          • ML_DSA_65

          • ML_DSA_87

        • SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets)

          • SM2 (China Regions only)

        " }, "Origin":{ "shape":"OriginType", @@ -1376,7 +1388,7 @@ }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

        Creates the KMS key in the specified custom key store. The ConnectionState of the custom key store must be CONNECTED. To find the CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.

        This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store.

        When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable 256-bit symmetric key in its associated CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you must use the XksKeyId parameter to specify an external key that serves as key material for the KMS key.

        " + "documentation":"

        Creates the KMS key in the specified custom key store. The ConnectionState of the custom key store must be CONNECTED. To find the CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.

        This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store.

        When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable 256-bit symmetric key in its associated CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you must use the XksKeyId parameter to specify an external key that serves as key material for the KMS key.

        " }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -1384,7 +1396,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

        " + "documentation":"

        Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tags in KMS.

        " }, "MultiRegion":{ "shape":"NullableBooleanType", @@ -1392,7 +1404,7 @@ }, "XksKeyId":{ "shape":"XksKeyIdType", - "documentation":"

        Identifies the external key that serves as key material for the KMS key in an external key store. Specify the ID that the external key store proxy uses to refer to the external key. For help, see the documentation for your external key store proxy.

        This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other Origin value.

        The external key must be an existing 256-bit AES symmetric encryption key hosted outside of Amazon Web Services in an external key manager associated with the external key store specified by the CustomKeyStoreId parameter. This key must be enabled and configured to perform encryption and decryption. Each KMS key in an external key store must use a different external key. For details, see Requirements for a KMS key in an external key store in the Key Management Service Developer Guide.

        Each KMS key in an external key store is associated two backing keys. One is key material that KMS generates. The other is the external key specified by this parameter. When you use the KMS key in an external key store to encrypt data, the encryption operation is performed first by KMS using the KMS key material, and then by the external key manager using the specified external key, a process known as double encryption. For details, see Double encryption in the Key Management Service Developer Guide.

        " + "documentation":"

        Identifies the external key that serves as key material for the KMS key in an external key store. Specify the ID that the external key store proxy uses to refer to the external key. For help, see the documentation for your external key store proxy.

        This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other Origin value.

        The external key must be an existing 256-bit AES symmetric encryption key hosted outside of Amazon Web Services in an external key manager associated with the external key store specified by the CustomKeyStoreId parameter. This key must be enabled and configured to perform encryption and decryption. Each KMS key in an external key store must use a different external key. For details, see Requirements for a KMS key in an external key store in the Key Management Service Developer Guide.

        Each KMS key in an external key store is associated two backing keys. One is key material that KMS generates. The other is the external key specified by this parameter. When you use the KMS key in an external key store to encrypt data, the encryption operation is performed first by KMS using the KMS key material, and then by the external key manager using the specified external key, a process known as double encryption. For details, see Double encryption in the Key Management Service Developer Guide.

        " } } }, @@ -1551,11 +1563,11 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "KeyId":{ "shape":"KeyIdType", @@ -1571,7 +1583,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1593,6 +1605,10 @@ "CiphertextForRecipient":{ "shape":"CiphertextType", "documentation":"

        The plaintext data encrypted with the public key in the attestation document.

        This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to decrypt the ciphertext. This field is present only when the operation uses a symmetric encryption KMS key. This field is omitted if the request includes the Recipient parameter.

        " } } }, @@ -1618,8 +1634,7 @@ }, "DeleteCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteImportedKeyMaterialRequest":{ "type":"structure", @@ -1628,6 +1643,23 @@ "KeyId":{ "shape":"KeyIdType", "documentation":"

        Identifies the KMS key from which you are deleting imported key material. The Origin of the KMS key must be EXTERNAL.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the imported key material you are deleting.

        If no KeyMaterialId is specified, KMS deletes the current key material.

        To get the list of key material IDs associated with a KMS key, use ListKeyRotations.

        " + } + } + }, + "DeleteImportedKeyMaterialResponse":{ + "type":"structure", + "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

        The Amazon Resource Name (key ARN) of the KMS key from which the key material was deleted.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdResponseType", + "documentation":"

        Identifies the deleted key material.

        " } } }, @@ -1662,11 +1694,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " }, "Recipient":{ "shape":"RecipientInfo", @@ -1747,7 +1779,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " } } }, @@ -1781,7 +1813,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " } } }, @@ -1805,8 +1837,7 @@ }, "DisconnectCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DryRunOperationException":{ "type":"structure", @@ -1832,7 +1863,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " }, "RotationPeriodInDays":{ "shape":"RotationPeriodInDaysType", @@ -1857,11 +1888,11 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "EncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -1869,7 +1900,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1935,7 +1966,7 @@ "members":{ "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "KeyId":{ "shape":"KeyIdType", @@ -1943,11 +1974,11 @@ }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", - "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " + "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), the rule that permits you to use ECC KMS keys only to sign and verify, and the rule that permits you to use ML-DSA key pairs to sign and verify only are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "Recipient":{ "shape":"RecipientInfo", @@ -1955,7 +1986,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1985,6 +2016,10 @@ "CiphertextForRecipient":{ "shape":"CiphertextType", "documentation":"

        The plaintext private data key encrypted with the public key from the Nitro enclave. This ciphertext can be decrypted only by using a private key in the Nitro enclave.

        This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the private key.

        " } } }, @@ -1997,7 +2032,7 @@ "members":{ "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "KeyId":{ "shape":"KeyIdType", @@ -2005,15 +2040,15 @@ }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", - "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " + "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), the rule that permits you to use ECC KMS keys only to sign and verify, and the rule that permits you to use ML-DSA key pairs to sign and verify only are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2035,6 +2070,10 @@ "KeyPairSpec":{ "shape":"DataKeyPairSpec", "documentation":"

        The type of data key pair that was generated.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the private key.

        " } } }, @@ -2048,7 +2087,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "NumberOfBytes":{ "shape":"NumberOfBytesType", @@ -2060,7 +2099,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "Recipient":{ "shape":"RecipientInfo", @@ -2068,7 +2107,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2090,6 +2129,10 @@ "CiphertextForRecipient":{ "shape":"CiphertextType", "documentation":"

        The plaintext data key encrypted with the public key from the Nitro enclave. This ciphertext can be decrypted only by using a private key in the Nitro enclave.

        This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the data key. This field is omitted if the request includes the Recipient parameter.

        " } } }, @@ -2103,7 +2146,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "KeySpec":{ "shape":"DataKeySpec", @@ -2115,11 +2158,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2133,6 +2176,10 @@ "KeyId":{ "shape":"KeyIdType", "documentation":"

        The Amazon Resource Name (key ARN) of the KMS key that encrypted the data key.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the data key.

        " } } }, @@ -2158,11 +2205,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2271,7 +2318,7 @@ }, "OnDemandRotationStartDate":{ "shape":"DateType", - "documentation":"

        Identifies the date and time that an in progress on-demand rotation was initiated.

        The KMS API follows an eventual consistency model due to the distributed nature of the system. As a result, there might be a slight delay between initiating on-demand key rotation and the rotation's completion. Once the on-demand rotation is complete, use ListKeyRotations to view the details of the on-demand rotation.

        " + "documentation":"

        Identifies the date and time that an in progress on-demand rotation was initiated.

        KMS uses a background process to perform rotations. As a result, there might be a slight delay between initiating on-demand key rotation and the rotation's completion. Once the on-demand rotation is complete, KMS removes this field from the response. You can use ListKeyRotations to view the details of the completed on-demand rotation.

        " } } }, @@ -2289,7 +2336,7 @@ }, "WrappingAlgorithm":{ "shape":"AlgorithmSpec", - "documentation":"

        The algorithm you will use with the RSA public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide.

        For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS.

        The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm.

        • RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material.

        • RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.

        • RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm.

        " + "documentation":"

        The algorithm you will use with the RSA public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide.

        For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS.

        The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm.

        • RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material.

        • RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.

        • RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm.

        " }, "WrappingKeySpec":{ "shape":"WrappingKeySpec", @@ -2328,7 +2375,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " } } }, @@ -2376,14 +2423,14 @@ "members":{ "EncryptionContextSubset":{ "shape":"EncryptionContextType", - "documentation":"

        A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

        " + "documentation":"

        A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

        " }, "EncryptionContextEquals":{ "shape":"EncryptionContextType", - "documentation":"

        A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

        " + "documentation":"

        A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

        " } }, - "documentation":"

        Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

        KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric KMS key. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric KMS keys and management operations, such as DescribeKey or RetireGrant.

        In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

        However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

        To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the Key Management Service Developer Guide .

        " + "documentation":"

        Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

        KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric KMS key. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric KMS keys and management operations, such as DescribeKey or RetireGrant.

        In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

        However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

        To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext:context-key in the Key Management Service Developer Guide .

        " }, "GrantIdType":{ "type":"string", @@ -2489,7 +2536,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        The identifier of the KMS key that will be associated with the imported key material. This must be the same KMS key specified in the KeyID parameter of the corresponding GetParametersForImport request. The Origin of the KMS key must be EXTERNAL and its KeyState must be PendingImport.

        The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key, including a multi-Region key of any supported type. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        The identifier of the KMS key that will be associated with the imported key material. This must be the same KMS key specified in the KeyID parameter of the corresponding GetParametersForImport request. The Origin of the KMS key must be EXTERNAL and its KeyState must be PendingImport.

        The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key, including a multi-Region key of any supported type. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " }, "ImportToken":{ "shape":"CiphertextType", @@ -2505,15 +2552,56 @@ }, "ExpirationModel":{ "shape":"ExpirationModelType", - "documentation":"

        Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

        When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

        You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must reimport the key material.

        " + "documentation":"

        Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

        When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

        You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must reimport the key material.

        " + }, + "ImportType":{ + "shape":"ImportType", + "documentation":"

        Indicates whether the key material being imported is previously associated with this KMS key or not. This parameter is optional and only usable with symmetric encryption keys. The default is EXISTING_KEY_MATERIAL. If no key material has ever been imported into the KMS key, and this parameter is omitted, the parameter defaults to NEW_KEY_MATERIAL.

        " + }, + "KeyMaterialDescription":{ + "shape":"KeyMaterialDescriptionType", + "documentation":"

        Description for the key material being imported. This parameter is optional and only usable with symmetric encryption keys. If you do not specify a key material description, KMS retains the value you specified when you last imported the same key material into this KMS key.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the key material being imported. This parameter is optional and only usable with symmetric encryption keys. You cannot specify a key material ID with ImportType set to NEW_KEY_MATERIAL. Whenever you import key material into a symmetric encryption key, KMS assigns a unique identifier to the key material based on the KMS key ID and the imported key material. When you re-import key material with a specified key material ID, KMS:

        • Computes the identifier for the key material

        • Matches the computed identifier against the specified key material ID

        • Verifies that the key material ID is already associated with the KMS key

        To get the list of key material IDs associated with a KMS key, use ListKeyRotations.

        " } } }, "ImportKeyMaterialResponse":{ "type":"structure", "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

        The Amazon Resource Name (key ARN) of the KMS key into which key material was imported.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the imported key material.

        " + } } }, + "ImportState":{ + "type":"string", + "enum":[ + "IMPORTED", + "PENDING_IMPORT" + ] + }, + "ImportType":{ + "type":"string", + "enum":[ + "NEW_KEY_MATERIAL", + "EXISTING_KEY_MATERIAL" + ] + }, + "IncludeKeyMaterial":{ + "type":"string", + "enum":[ + "ALL_KEY_MATERIAL", + "ROTATIONS_ONLY" + ] + }, "IncorrectKeyException":{ "type":"structure", "members":{ @@ -2527,7 +2615,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

        The request was rejected because the key material in the request is, expired, invalid, or is not the same key material that was previously imported into this KMS key.

        ", + "documentation":"

        The request was rejected because the key material in the request is, expired, invalid, or does not meet expectations. For example, it is not the same key material that was previously imported or KMS expected new key material but the key material being imported is already associated with the KMS key.

        ", "exception":true }, "IncorrectTrustAnchorException":{ @@ -2677,6 +2765,20 @@ "CUSTOMER" ] }, + "KeyMaterialDescriptionType":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[a-zA-Z0-9:/_\\s.-]+$" + }, + "KeyMaterialState":{ + "type":"string", + "enum":[ + "NON_CURRENT", + "CURRENT", + "PENDING_ROTATION" + ] + }, "KeyMetadata":{ "type":"structure", "required":["KeyId"], @@ -2707,7 +2809,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

        The cryptographic operations for which you can use the KMS key.

        " + "documentation":"

        The cryptographic operations for which you can use the KMS key.

        " }, "KeyState":{ "shape":"KeyState", @@ -2719,7 +2821,7 @@ }, "ValidTo":{ "shape":"DateType", - "documentation":"

        The time at which the imported key material expires. When the key material expires, KMS deletes the key material and the KMS key becomes unusable. This value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted.

        " + "documentation":"

        The earliest time at which any imported key material permanently associated with this KMS key expires. When a key material expires, KMS deletes the key material and the KMS key becomes unusable. This value is present only for KMS keys whose Origin is EXTERNAL and the ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted.

        " }, "Origin":{ "shape":"OriginType", @@ -2727,11 +2829,11 @@ }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

        A unique identifier for the custom key store that contains the KMS key. This field is present only when the KMS key is created in a custom key store.

        " + "documentation":"

        A unique identifier for the custom key store that contains the KMS key. This field is present only when the KMS key is created in a custom key store.

        " }, "CloudHsmClusterId":{ "shape":"CloudHsmClusterIdType", - "documentation":"

        The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in an CloudHSM custom key store, KMS creates the key material for the KMS key in the associated CloudHSM cluster. This field is present only when the KMS key is created in an CloudHSM key store.

        " + "documentation":"

        The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in an CloudHSM custom key store, KMS creates the key material for the KMS key in the associated CloudHSM cluster. This field is present only when the KMS key is created in an CloudHSM key store.

        " }, "ExpirationModel":{ "shape":"ExpirationModelType", @@ -2782,6 +2884,10 @@ "XksKeyConfiguration":{ "shape":"XksKeyConfigurationType", "documentation":"

        Information about the external key that is associated with a KMS key in an external key store.

        For more information, see External key in the Key Management Service Developer Guide.

        " + }, + "CurrentKeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the current key material. This value is present for symmetric encryption keys with AWS_KMS origin and single-Region, symmetric encryption keys with EXTERNAL origin. These KMS keys support automatic or on-demand key rotation and can have multiple key materials associated with them. KMS uses the current key material for both encryption and decryption, and the non-current key material for decryption operations only.

        " } }, "documentation":"

        Contains metadata about a KMS key.

        This data type is used as a response element for the CreateKey, DescribeKey, and ReplicateKey operations.

        " @@ -2801,7 +2907,10 @@ "HMAC_256", "HMAC_384", "HMAC_512", - "SM2" + "SM2", + "ML_DSA_44", + "ML_DSA_65", + "ML_DSA_87" ] }, "KeyState":{ @@ -2846,7 +2955,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

        The request was rejected because a quota was exceeded. For more information, see Quotas in the Key Management Service Developer Guide.

        ", + "documentation":"

        The request was rejected because a length constraint or quota was exceeded. For more information, see Quotas in the Key Management Service Developer Guide.

        ", "exception":true }, "LimitType":{ @@ -2974,6 +3083,10 @@ "shape":"KeyIdType", "documentation":"

        Gets the key rotations for the specified KMS key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " }, + "IncludeKeyMaterial":{ + "shape":"IncludeKeyMaterial", + "documentation":"

        Use this optional parameter to control which key materials associated with this key are listed in the response. The default value of this parameter is ROTATIONS_ONLY. If you omit this parameter, KMS returns information on the key materials created by automatic or on-demand key rotation. When you specify a value of ALL_KEY_MATERIAL, KMS adds the first key material and any imported key material pending rotation to the response. This parameter can only be used with KMS keys that support automatic or on-demand key rotation.

        " + }, "Limit":{ "shape":"LimitType", "documentation":"

        Use this parameter to specify the maximum number of items to return. When this value is present, KMS does not return more than the specified number of items, but it might return fewer.

        This value is optional. If you include a value, it must be between 1 and 1000, inclusive. If you do not include a value, it defaults to 100.

        " @@ -2989,7 +3102,7 @@ "members":{ "Rotations":{ "shape":"RotationsList", - "documentation":"

        A list of completed key material rotations.

        " + "documentation":"

        A list of completed key material rotations. When the optional input parameter IncludeKeyMaterial is specified with a value of ALL_KEY_MATERIAL, this list includes the first key material and any imported key material pending rotation.

        " }, "NextMarker":{ "shape":"MarkerType", @@ -3115,7 +3228,8 @@ "type":"string", "enum":[ "RAW", - "DIGEST" + "DIGEST", + "EXTERNAL_MU" ] }, "MultiRegionConfiguration":{ @@ -3239,7 +3353,7 @@ }, "Policy":{ "shape":"PolicyType", - "documentation":"

        The key policy to attach to the KMS key.

        The key policy must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide.For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " + "documentation":"

        The key policy to attach to the KMS key.

        The key policy must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        If either of the required Resource or Action elements are missing from a key policy statement, the policy statement has no effect. When a key policy statement is missing one of these elements, the KMS console correctly reports an error, but the PutKeyPolicy API request succeeds, even though the policy statement is ineffective.

        For more information on required key policy elements, see Elements in a key policy in the Key Management Service Developer Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        If the key policy exceeds the length constraint, KMS returns a LimitExceededException.

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide.For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -3260,7 +3374,7 @@ }, "SourceEncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context to use to decrypt the ciphertext. Enter the same encryption context that was used to encrypt the ciphertext.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context to use to decrypt the ciphertext. Enter the same encryption context that was used to encrypt the ciphertext.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "SourceKeyId":{ "shape":"KeyIdType", @@ -3272,7 +3386,7 @@ }, "DestinationEncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies that encryption context to use when the reencrypting the data.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        A destination encryption context is valid only when the destination KMS key is a symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS keys does not include fields for metadata.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies that encryption context to use when the reencrypting the data.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        A destination encryption context is valid only when the destination KMS key is a symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS keys does not include fields for metadata.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "SourceEncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -3284,11 +3398,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3314,6 +3428,14 @@ "DestinationEncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", "documentation":"

        The encryption algorithm that was used to reencrypt the data.

        " + }, + "SourceKeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to originally encrypt the data. This field is present only when the original encryption used a symmetric encryption KMS key.

        " + }, + "DestinationKeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to reencrypt the data. This field is present only when data is reencrypted using a symmetric encryption KMS key.

        " } } }, @@ -3350,11 +3472,11 @@ }, "ReplicaRegion":{ "shape":"RegionType", - "documentation":"

        The Region ID of the Amazon Web Services Region for this replica key.

        Enter the Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web Services Regions in which KMS is supported, see KMS service endpoints in the Amazon Web Services General Reference.

        HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to replicate an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the ReplicateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

        The replica must be in a different Amazon Web Services Region than its primary key and other replicas of that primary key, but in the same Amazon Web Services partition. KMS must be available in the replica Region. If the Region is not enabled by default, the Amazon Web Services account must be enabled in the Region. For information about Amazon Web Services partitions, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For information about enabling and disabling Regions, see Enabling a Region and Disabling a Region in the Amazon Web Services General Reference.

        " + "documentation":"

        The Region ID of the Amazon Web Services Region for this replica key.

        Enter the Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web Services Regions in which KMS is supported, see KMS service endpoints in the Amazon Web Services General Reference.

        The replica must be in a different Amazon Web Services Region than its primary key and other replicas of that primary key, but in the same Amazon Web Services partition. KMS must be available in the replica Region. If the Region is not enabled by default, the Amazon Web Services account must be enabled in the Region. For information about Amazon Web Services partitions, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For information about enabling and disabling Regions, see Enabling a Region and Disabling a Region in the Amazon Web Services General Reference.

        " }, "Policy":{ "shape":"PolicyType", - "documentation":"

        The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

        The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " + "documentation":"

        The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

        The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -3366,7 +3488,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        Assigns one or more tags to the replica key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Tags are not a shared property of multi-Region keys. You can specify the same tags or different tags for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

        " + "documentation":"

        Assigns one or more tags to the replica key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Tags are not a shared property of multi-Region keys. You can specify the same tags or different tags for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tags in KMS.

        " } } }, @@ -3404,7 +3526,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3425,7 +3547,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3435,7 +3557,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        Identifies a symmetric encryption KMS key. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Identifies a symmetric encryption KMS key. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, multi-Region KMS keys with imported key material, or KMS keys in a custom key store. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " } } }, @@ -3471,16 +3593,40 @@ "shape":"KeyIdType", "documentation":"

        Unique identifier of the key.

        " }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Unique identifier of the key material.

        " + }, + "KeyMaterialDescription":{ + "shape":"KeyMaterialDescriptionType", + "documentation":"

        User-specified description of the key material. This field is only present for symmetric encryption KMS keys with EXTERNAL origin.

        " + }, + "ImportState":{ + "shape":"ImportState", + "documentation":"

        Indicates if the key material is currently imported into KMS. It has two possible values: IMPORTED or PENDING_IMPORT. This field is only present for symmetric encryption KMS keys with EXTERNAL origin.

        " + }, + "KeyMaterialState":{ + "shape":"KeyMaterialState", + "documentation":"

        There are three possible values for this field: CURRENT, NON_CURRENT and PENDING_ROTATION. KMS uses CURRENT key material for both encryption and decryption and NON_CURRENT key material only for decryption. PENDING_ROTATION identifies key material that has been imported for on-demand key rotation but the rotation hasn't completed. Key material in PENDING_ROTATION is not permanently associated with the KMS key. You can delete this key material and import different key material in its place. The PENDING_ROTATION value is only used in symmetric encryption keys with imported key material. The other values, CURRENT and NON_CURRENT, are used for all KMS keys that support automatic or on-demand key rotation.

        " + }, + "ExpirationModel":{ + "shape":"ExpirationModelType", + "documentation":"

        Indicates if the key material is configured to automatically expire. There are two possible values for this field: KEY_MATERIAL_EXPIRES and KEY_MATERIAL_DOES_NOT_EXPIRE. For any key material that expires, the expiration date and time is indicated in ValidTo. This field is only present for symmetric encryption KMS keys with EXTERNAL origin.

        " + }, + "ValidTo":{ + "shape":"DateType", + "documentation":"

        Date and time at which the key material expires. This field is only present for symmetric encryption KMS keys with EXTERNAL origin in rotation list entries with an ExpirationModel value of KEY_MATERIAL_EXPIRES.

        " + }, "RotationDate":{ "shape":"DateType", - "documentation":"

        Date and time that the key material rotation completed. Formatted as Unix time.

        " + "documentation":"

        Date and time that the key material rotation completed. Formatted as Unix time. This field is not present for the first key material or an imported key material in PENDING_ROTATION state.

        " }, "RotationType":{ "shape":"RotationType", - "documentation":"

        Identifies whether the key material rotation was a scheduled automatic rotation or an on-demand rotation.

        " + "documentation":"

        Identifies whether the key material rotation was a scheduled automatic rotation or an on-demand rotation. This field is not present for the first key material or an imported key material in PENDING_ROTATION state.

        " } }, - "documentation":"

        Contains information about completed key material rotations.

        " + "documentation":"

        Each entry contains information about one of the key materials associated with a KMS key.

        " }, "ScheduleKeyDeletionRequest":{ "type":"structure", @@ -3535,11 +3681,11 @@ }, "MessageType":{ "shape":"MessageType", - "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm.

        Use the DIGEST value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the signing operation can be compromised.

        When the value of MessageTypeis DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, this can cause verification failures when verifying with a system that assumes a single hash.

        The hashing algorithm in that Sign uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " + "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed; use EXTERNAL_MU for 64-byte representative μ used in ML-DSA signing as defined in NIST FIPS 204 Section 6.2.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm. When the value is EXTERNAL_MU KMS skips the concatenated hashing of the public key hash and the message done in the ML-DSA signing algorithm.

        Use the DIGEST or EXTERNAL_MU value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the signing operation can be compromised.

        When the value of MessageType is DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        When the value of MessageType is EXTERNAL_MU the length of the Message value must be 64 bytes.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, this can cause verification failures when verifying with a system that assumes a single hash.

        The hashing algorithm that Sign uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • Signing algorithms that end in SHAKE_256 use the SHAKE_256 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "SigningAlgorithm":{ "shape":"SigningAlgorithmSpec", @@ -3547,7 +3693,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3580,7 +3726,8 @@ "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", - "SM2DSA" + "SM2DSA", + "ML_DSA_SHAKE_256" ] }, "SigningAlgorithmSpecList":{ @@ -3739,8 +3886,7 @@ }, "UpdateCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateKeyDescriptionRequest":{ "type":"structure", @@ -3803,11 +3949,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3847,7 +3993,7 @@ }, "MessageType":{ "shape":"MessageType", - "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm.

        Use the DIGEST value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the verification operation can be compromised.

        When the value of MessageTypeis DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, if the signed message is hashed once while signing, but twice while verifying, verification fails, even when the message hasn't changed.

        The hashing algorithm in that Verify uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " + "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed; use EXTERNAL_MU for 64-byte representative μ used in ML-DSA signing as defined in NIST FIPS 204 Section 6.2.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm. When the value is EXTERNAL_MU KMS skips the concatenated hashing of the public key hash and the message done in the ML-DSA signing algorithm.

        Use the DIGEST or EXTERNAL_MU value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the signing operation can be compromised.

        When the value of MessageType is DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        When the value of MessageType is EXTERNAL_MU the length of the Message value must be 64 bytes.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, if the signed message is hashed once while signing, but twice while verifying, verification fails, even when the message hasn't changed.

        The hashing algorithm that Verify uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • Signing algorithms that end in SHAKE_256 use the SHAKE_256 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " }, "Signature":{ "shape":"CiphertextType", @@ -3859,11 +4005,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index a6a4f085109d..1a62d9f46e14 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 357a86f1b32d..cbe4a1ec2e12 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/launchwizard/pom.xml b/services/launchwizard/pom.xml index 4b70cd34f870..51082bf325b8 100644 --- a/services/launchwizard/pom.xml +++ b/services/launchwizard/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT launchwizard AWS Java SDK :: Services :: Launch Wizard diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 9f410ab2d164..b41ed0b96823 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 8a5131d0a697..d421d1eb2ad1 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json index 871fecce8049..2b3c3e2babb6 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json @@ -5737,8 +5737,7 @@ }, "CreateUploadUrlRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateUploadUrlResponse":{ "type":"structure", @@ -6494,8 +6493,7 @@ }, "DeleteUtterancesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeBotAliasRequest":{ "type":"structure", @@ -7772,7 +7770,7 @@ }, "Description":{ "type":"string", - "max":200, + "max":2000, "min":0 }, "DescriptiveBotBuilderSpecification":{ @@ -10880,6 +10878,17 @@ "min":0 }, "NextToken":{"type":"string"}, + "NluImprovementSpecification":{ + "type":"structure", + "required":["enabled"], + "members":{ + "enabled":{ + "shape":"Enabled", + "documentation":"

        Specifies whether the assisted nlu feature is enabled.

        " + } + }, + "documentation":"

        Specifies whether the assisted nlu feature is turned on or off.

        " + }, "NonEmptyString":{ "type":"string", "min":1 @@ -11501,6 +11510,10 @@ "slotResolutionImprovement":{ "shape":"SlotResolutionImprovementSpecification", "documentation":"

        An object containing specifications for the assisted slot resolution feature.

        " + }, + "nluImprovement":{ + "shape":"NluImprovementSpecification", + "documentation":"

        An object containing specifications for the assisted nlu feature.

        " } }, "documentation":"

        Contains specifications about the Amazon Lex runtime generative AI capabilities from Amazon Bedrock that you can turn on for your bot.

        " @@ -12942,8 +12955,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -13650,8 +13662,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateBotAliasRequest":{ "type":"structure", diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index facb3c956cf1..ba1690b91e6e 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index df2d831dc26a..2e47a25d4583 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index 69f3999f6153..5e76c8d05bce 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index 33a3c70bdaac..8ac55e4308d1 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index 2b9ac042e026..31c6162867cf 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index c9efe0530075..788150c00c6f 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/location/pom.xml b/services/location/pom.xml index 295247e3f50f..bf9d16e5239c 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 2cd985d981d9..973e2abea786 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 78a2210e9af0..2ca805d9708e 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 13c4a21b3d05..7d3c9a2a9bb4 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index 77dea431db65..1156f61edd89 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index 65e7134fdc51..84447e9ef442 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index cdfb840ba492..802766d770c4 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/mailmanager/pom.xml b/services/mailmanager/pom.xml index 5973890128b4..608e35533019 100644 --- a/services/mailmanager/pom.xml +++ b/services/mailmanager/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mailmanager AWS Java SDK :: Services :: Mail Manager diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index a6be8821fb52..8cdbac4ad7c6 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml index f5d0071368d6..6d4535fa7d30 100644 --- a/services/managedblockchainquery/pom.xml +++ b/services/managedblockchainquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT managedblockchainquery AWS Java SDK :: Services :: Managed Blockchain Query diff --git a/services/marketplaceagreement/pom.xml b/services/marketplaceagreement/pom.xml index 0fe73eafcb66..8ad5a934691a 100644 --- a/services/marketplaceagreement/pom.xml +++ b/services/marketplaceagreement/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplaceagreement AWS Java SDK :: Services :: Marketplace Agreement diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 50dfc6f708fe..1700c00d972f 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json index c7dd96bfef1b..10e7ca80a538 100644 --- a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"catalog.marketplace", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"AWS Marketplace Catalog", "serviceFullName":"AWS Marketplace Catalog Service", "serviceId":"Marketplace Catalog", "signatureVersion":"v4", "signingName":"aws-marketplace", - "uid":"marketplace-catalog-2018-09-17" + "uid":"marketplace-catalog-2018-09-17", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchDescribeEntities":{ @@ -200,7 +202,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

        Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error.

        For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1).

        For more information about working with change sets, see Working with change sets. For information about change types for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products.

        " + "documentation":"

        Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error.

        For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1).

        For more information about working with change sets, see Working with change sets. For information about change types for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products.

        To download \"DetailsDocument\" shapes, see Python and Java shapes on GitHub.

        " }, "TagResource":{ "name":"TagResource", @@ -524,7 +526,7 @@ }, "DetailsDocument":{ "shape":"JsonDocumentType", - "documentation":"

        Alternative field that accepts a JSON value instead of a string for ChangeType details. You can use either Details or DetailsDocument, but not both.

        " + "documentation":"

        Alternative field that accepts a JSON value instead of a string for ChangeType details. You can use either Details or DetailsDocument, but not both.

        To download the \"DetailsDocument\" shapes, see the Python and Java shapes on GitHub.

        " }, "ChangeName":{ "shape":"ChangeName", @@ -618,7 +620,7 @@ }, "DetailsDocument":{ "shape":"JsonDocumentType", - "documentation":"

        The JSON value of the details specific to the change type of the requested change.

        " + "documentation":"

        The JSON value of the details specific to the change type of the requested change.

        To download the \"DetailsDocument\" shapes, see the Python and Java shapes on GitHub.

        " }, "ErrorDetailList":{ "shape":"ErrorDetailList", @@ -976,8 +978,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeChangeSetRequest":{ "type":"structure", @@ -1091,7 +1092,7 @@ }, "DetailsDocument":{ "shape":"JsonDocumentType", - "documentation":"

        The JSON value of the details specific to the entity.

        " + "documentation":"

        The JSON value of the details specific to the entity.

        To download \"DetailsDocument\" shapes, see the Python and Java shapes on GitHub.

        " } } }, @@ -1227,7 +1228,8 @@ "ResaleAuthorizationSummary":{ "shape":"ResaleAuthorizationSummary", "documentation":"

        An object that contains summary information about the Resale Authorization.

        " - } + }, + "MachineLearningProductSummary":{"shape":"MachineLearningProductSummary"} }, "documentation":"

        This object is a container for common summary information about the entity. The summary doesn't contain the whole entity structure, but it does contain information common across all entities.

        " }, @@ -1267,7 +1269,8 @@ "ResaleAuthorizationFilters":{ "shape":"ResaleAuthorizationFilters", "documentation":"

        A filter for Resale Authorizations.

        " - } + }, + "MachineLearningProductFilters":{"shape":"MachineLearningProductFilters"} }, "documentation":"

        Object containing all the filter fields per entity type.

        ", "union":true @@ -1298,7 +1301,8 @@ "ResaleAuthorizationSort":{ "shape":"ResaleAuthorizationSort", "documentation":"

        A sort for Resale Authorizations.

        " - } + }, + "MachineLearningProductSort":{"shape":"MachineLearningProductSort"} }, "documentation":"

        Object containing all the sort fields per entity type.

        ", "union":true @@ -1429,8 +1433,7 @@ }, "JsonDocumentType":{ "type":"structure", - "members":{ - }, + "members":{}, "document":true }, "ListChangeSetsMaxResultInteger":{ @@ -1565,6 +1568,169 @@ } } }, + "MachineLearningProductEntityIdFilter":{ + "type":"structure", + "members":{ + "ValueList":{ + "shape":"MachineLearningProductEntityIdFilterValueList", + "documentation":"

        A list of entity IDs to filter by. The operation returns machine learning products with entity IDs that match the values in this list.

        " + } + }, + "documentation":"

        The filter for machine learning product entity IDs.

        " + }, + "MachineLearningProductEntityIdFilterValueList":{ + "type":"list", + "member":{"shape":"MachineLearningProductEntityIdString"}, + "documentation":"

        A list of entity ID values to filter by. You can include up to 10 entity IDs in this list.

        ", + "max":10, + "min":1 + }, + "MachineLearningProductEntityIdString":{ + "type":"string", + "documentation":"

        The entity ID of a machine learning product. This string uniquely identifies the product.

        ", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9][.a-zA-Z0-9/-]+[a-zA-Z0-9]$" + }, + "MachineLearningProductFilters":{ + "type":"structure", + "members":{ + "EntityId":{ + "shape":"MachineLearningProductEntityIdFilter", + "documentation":"

        Filter machine learning products by their entity IDs.

        " + }, + "LastModifiedDate":{ + "shape":"MachineLearningProductLastModifiedDateFilter", + "documentation":"

        Filter machine learning products by their last modified date.

        " + }, + "ProductTitle":{ + "shape":"MachineLearningProductTitleFilter", + "documentation":"

        Filter machine learning products by their product titles.

        " + }, + "Visibility":{ + "shape":"MachineLearningProductVisibilityFilter", + "documentation":"

        Filter machine learning products by their visibility status.

        " + } + }, + "documentation":"

        The filters that you can use with the ListEntities operation to filter machine learning products. You can filter by EntityId, LastModifiedDate, ProductTitle, and Visibility.

        " + }, + "MachineLearningProductLastModifiedDateFilter":{ + "type":"structure", + "members":{ + "DateRange":{ + "shape":"MachineLearningProductLastModifiedDateFilterDateRange", + "documentation":"

        A date range to filter by. The operation returns machine learning products with last modified dates that fall within this range.

        " + } + }, + "documentation":"

        The filter for machine learning product last modified date.

        " + }, + "MachineLearningProductLastModifiedDateFilterDateRange":{ + "type":"structure", + "members":{ + "AfterValue":{ + "shape":"DateTimeISO8601", + "documentation":"

        The start date (inclusive) of the date range. The operation returns machine learning products with last modified dates on or after this date.

        " + }, + "BeforeValue":{ + "shape":"DateTimeISO8601", + "documentation":"

        The end date (inclusive) of the date range. The operation returns machine learning products with last modified dates on or before this date.

        " + } + }, + "documentation":"

        A date range for filtering machine learning products by their last modified date.

        " + }, + "MachineLearningProductSort":{ + "type":"structure", + "members":{ + "SortBy":{ + "shape":"MachineLearningProductSortBy", + "documentation":"

        The field to sort by. Valid values: EntityId, LastModifiedDate, ProductTitle, and Visibility.

        " + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

        The sort order. Valid values are ASC (ascending) and DESC (descending).

        " + } + }, + "documentation":"

        The sort options for machine learning products.

        " + }, + "MachineLearningProductSortBy":{ + "type":"string", + "documentation":"

        The fields that you can sort machine learning products by.

        ", + "enum":[ + "EntityId", + "LastModifiedDate", + "ProductTitle", + "Visibility" + ] + }, + "MachineLearningProductSummary":{ + "type":"structure", + "members":{ + "ProductTitle":{ + "shape":"MachineLearningProductTitleString", + "documentation":"

        The title of the machine learning product.

        " + }, + "Visibility":{ + "shape":"MachineLearningProductVisibilityString", + "documentation":"

        The visibility status of the machine learning product. Valid values are Limited, Public, Restricted, and Draft.

        " + } + }, + "documentation":"

        A summary of a machine learning product.

        " + }, + "MachineLearningProductTitleFilter":{ + "type":"structure", + "members":{ + "ValueList":{ + "shape":"MachineLearningProductTitleFilterValueList", + "documentation":"

        A list of product titles to filter by. The operation returns machine learning products with titles that exactly match the values in this list.

        " + }, + "WildCardValue":{ + "shape":"MachineLearningProductTitleString", + "documentation":"

        A wildcard value to filter product titles. The operation returns machine learning products with titles that match this wildcard pattern.

        " + } + }, + "documentation":"

        The filter for machine learning product titles.

        " + }, + "MachineLearningProductTitleFilterValueList":{ + "type":"list", + "member":{"shape":"MachineLearningProductTitleString"}, + "documentation":"

        A list of product title values to filter by. You can include up to 10 product titles in this list.

        ", + "max":10, + "min":1 + }, + "MachineLearningProductTitleString":{ + "type":"string", + "documentation":"

        The title of a machine learning product.

        ", + "max":255, + "min":1, + "pattern":"^(.)+$" + }, + "MachineLearningProductVisibilityFilter":{ + "type":"structure", + "members":{ + "ValueList":{ + "shape":"MachineLearningProductVisibilityFilterValueList", + "documentation":"

        A list of visibility values to filter by. The operation returns machine learning products with visibility status that match the values in this list.

        " + } + }, + "documentation":"

        The filter for machine learning product visibility status.

        " + }, + "MachineLearningProductVisibilityFilterValueList":{ + "type":"list", + "member":{"shape":"MachineLearningProductVisibilityString"}, + "documentation":"

        A list of visibility status values to filter by. You can include up to 10 visibility status values in this list.

        ", + "max":10, + "min":1 + }, + "MachineLearningProductVisibilityString":{ + "type":"string", + "documentation":"

        The visibility status of a machine learning product. Valid values are:

        • Limited - The product is available to a limited set of buyers.

        • Public - The product is publicly available to all buyers.

        • Restricted - The product has restricted availability.

        • Draft - The product is in draft state and not yet available to buyers.

        ", + "enum":[ + "Limited", + "Public", + "Restricted", + "Draft" + ] + }, "NextToken":{ "type":"string", "max":2048, @@ -1956,8 +2122,7 @@ }, "PutResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RequestedChangeList":{ "type":"list", @@ -2836,8 +3001,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -2874,8 +3038,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ValidationException":{ "type":"structure", @@ -2900,5 +3063,5 @@ "pattern":"^[a-zA-Z]+$" } }, - "documentation":"

        Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace.

        You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

        " + "documentation":"

        Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace.

        You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

        " } diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 82617f73f43c..f76ad32636e6 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplacedeployment/pom.xml b/services/marketplacedeployment/pom.xml index 4550c92d4423..43ed012f1305 100644 --- a/services/marketplacedeployment/pom.xml +++ b/services/marketplacedeployment/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplacedeployment AWS Java SDK :: Services :: Marketplace Deployment diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index f83da88b8214..ccba605de770 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index fcc422146e84..5fe419f56769 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/marketplacereporting/pom.xml b/services/marketplacereporting/pom.xml index 3661b057cb5b..ee2013c3eb10 100644 --- a/services/marketplacereporting/pom.xml +++ b/services/marketplacereporting/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT marketplacereporting AWS Java SDK :: Services :: Marketplace Reporting diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 7ece9f6a9a8a..b47b2774861a 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconnect/src/main/resources/codegen-resources/service-2.json b/services/mediaconnect/src/main/resources/codegen-resources/service-2.json index 26b20431cfbf..3dec52ad2919 100644 --- a/services/mediaconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconnect/src/main/resources/codegen-resources/service-2.json @@ -26,8 +26,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -46,8 +46,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -65,8 +65,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -85,8 +85,8 @@ {"shape":"BadRequestException"}, {"shape":"AddFlowOutputs420Exception"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -104,8 +104,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -123,8 +123,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -144,8 +144,8 @@ {"shape":"CreateBridge420Exception"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

        Creates a new bridge. The request must include one source.

        " @@ -162,8 +162,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"ServiceUnavailableException"}, {"shape":"CreateFlow420Exception"} ], @@ -182,9 +182,9 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, - {"shape":"ForbiddenException"}, {"shape":"CreateGateway420Exception"}, + {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

        Creates a new gateway. The request must include at least one network (up to four).

        " @@ -202,8 +202,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -222,8 +222,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -243,8 +243,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -264,8 +264,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -285,8 +285,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -304,8 +304,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -323,8 +323,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -342,8 +342,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -362,8 +362,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -382,8 +382,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -438,8 +438,8 @@ {"shape":"BadRequestException"}, {"shape":"GrantFlowEntitlements420Exception"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -595,8 +595,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -615,8 +615,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -636,8 +636,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -656,8 +656,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -676,8 +676,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -696,8 +696,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -716,8 +716,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -736,8 +736,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -756,8 +756,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -775,8 +775,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -826,8 +826,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -847,8 +847,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -868,8 +868,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -889,8 +889,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -909,8 +909,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -929,8 +929,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -949,8 +949,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -969,8 +969,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -989,8 +989,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -1010,8 +1010,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -2913,7 +2913,7 @@ "members":{ "ChannelOrder":{ "shape":"String", - "documentation":"

        The format of the audio channel.

        ", + "documentation":"

        The format of the audio channel.

        ", "locationName":"channelOrder" }, "Colorimetry":{ @@ -4379,6 +4379,11 @@ "shape":"OutputStatus", "documentation":"

        An indication of whether the output is transmitting data or not.

        ", "locationName":"outputStatus" + }, + "PeerIpAddress":{ + "shape":"String", + "documentation":"

        The IP address of the device that is currently receiving content from this output.

        • For outputs that use protocols where you specify the destination (such as SRT Caller or Zixi Push), this value matches the configured destination address.

        • For outputs that use listener protocols (such as SRT Listener), this value shows the address of the connected receiver.

        • Peer IP addresses aren't available for entitlements, managed MediaLive outputs, NDI outputs, and CDI/ST2110 outputs.

        • The peer IP address might not be visible for flows that haven't been started yet, or flows that were started before May 2025. In these cases, restart your flow to see the peer IP address.

        ", + "locationName":"peerIpAddress" } }, "documentation":"

        The settings for an output.

        " @@ -5115,6 +5120,11 @@ "shape":"GatewayBridgeSource", "documentation":"

        The source configuration for cloud flows receiving a stream from a bridge.

        ", "locationName":"gatewayBridgeSource" + }, + "PeerIpAddress":{ + "shape":"String", + "documentation":"

        The IP address of the device that is currently sending content to this source.

        • For sources that use protocols where you specify the origin (such as SRT Caller), this value matches the configured origin address.

        • For sources that use listener protocols (such as SRT Listener or RTP), this value shows the address of the connected sender.

        • Peer IP addresses aren't available for entitlements and CDI/ST2110 sources.

        • The peer IP address might not be visible for flows that haven't been started yet, or flows that were started before May 2025. In these cases, restart your flow to see the peer IP address.

        ", + "locationName":"peerIpAddress" } }, "documentation":"

        The settings for the source of the flow.

        " diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index e346bfd4de87..3f4adf4843fb 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index fb8376a3618c..b1a76351ce05 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -2115,7 +2115,7 @@ "SelectorType": { "shape": "AudioSelectorType", "locationName": "selectorType", - "documentation": "Specifies the type of the audio selector." + "documentation": "Specify how MediaConvert selects audio content within your input. The default is Track. PID: Select audio by specifying the Packet Identifier (PID) values for MPEG Transport Stream inputs. Use this when you know the exact PID values of your audio streams. Track: Default. Select audio by track number. This is the most common option and works with most input container formats. Language code: Select audio by language using ISO 639-2 or ISO 639-3 three-letter language codes. Use this when your source has embedded language metadata and you want to select tracks based on their language. HLS rendition group: Select audio from an HLS rendition group. Use this when your input is an HLS package with multiple audio renditions and you want to select specific rendition groups. All PCM: Select all uncompressed PCM audio tracks from your input automatically. This is useful when you want to include all PCM audio tracks without specifying individual track numbers." }, "Tracks": { "shape": "__listOf__integerMin1Max2147483647", @@ -2138,7 +2138,7 @@ }, "AudioSelectorType": { "type": "string", - "documentation": "Specifies the type of the audio selector.", + "documentation": "Specify how MediaConvert selects audio content within your input. The default is Track. PID: Select audio by specifying the Packet Identifier (PID) values for MPEG Transport Stream inputs. Use this when you know the exact PID values of your audio streams. Track: Default. Select audio by track number. This is the most common option and works with most input container formats. Language code: Select audio by language using ISO 639-2 or ISO 639-3 three-letter language codes. Use this when your source has embedded language metadata and you want to select tracks based on their language. HLS rendition group: Select audio from an HLS rendition group. Use this when your input is an HLS package with multiple audio renditions and you want to select specific rendition groups. All PCM: Select all uncompressed PCM audio tracks from your input automatically. This is useful when you want to include all PCM audio tracks without specifying individual track numbers.", "enum": [ "PID", "TRACK", @@ -8640,7 +8640,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInput", @@ -8811,7 +8811,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInputTemplate", @@ -10202,6 +10202,14 @@ }, "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." }, + "Mp4C2paManifest": { + "type": "string", + "documentation": "When enabled, a C2PA compliant manifest will be generated, signed and embeded in the output. For more information on C2PA, see https://c2pa.org/specifications/specifications/2.1/index.html", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, "Mp4CslgAtom": { "type": "string", "documentation": "When enabled, file composition times will start at zero, composition times in the 'ctts' (composition time to sample) box for B-frames will be negative, and a 'cslg' (composition shift least greatest) box will be included per 14496-1 amendment 1. This improves compatibility with Apple players and tools.", @@ -10234,6 +10242,16 @@ "locationName": "audioDuration", "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, + "C2paManifest": { + "shape": "Mp4C2paManifest", + "locationName": "c2paManifest", + "documentation": "When enabled, a C2PA compliant manifest will be generated, signed and embeded in the output. For more information on C2PA, see https://c2pa.org/specifications/specifications/2.1/index.html" + }, + "CertificateSecret": { + "shape": "__stringMin1Max2048PatternArnAZSecretsmanagerWD12SecretAZAZ09", + "locationName": "certificateSecret", + "documentation": "Specify the name or ARN of the AWS Secrets Manager secret that contains your C2PA public certificate chain in PEM format. Provide a valid secret name or ARN. Note that your MediaConvert service role must allow access to this secret. The public certificate chain is added to the COSE header (x5chain) for signature validation. Include the signer's certificate and all intermediate certificates. Do not include the root certificate. For details on COSE, see: https://opensource.contentauthenticity.org/docs/manifest/signing-manifests" + }, "CslgAtom": { "shape": "Mp4CslgAtom", "locationName": "cslgAtom", @@ -10258,6 +10276,11 @@ "shape": "__string", "locationName": "mp4MajorBrand", "documentation": "Overrides the \"Major Brand\" field in the output file. Usually not necessary to specify." + }, + "SigningKmsKey": { + "shape": "__stringMin1PatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912MrkAFAF0932", + "locationName": "signingKmsKey", + "documentation": "Specify the ID or ARN of the AWS KMS key used to sign the C2PA manifest in your MP4 output. Provide a valid KMS key ARN. Note that your MediaConvert service role must allow access to this key." } }, "documentation": "These settings relate to your MP4 output container. You can create audio only outputs with this container. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only." @@ -15485,6 +15508,11 @@ "max": 50, "pattern": "^[a-zA-Z0-9_\\/_+=.@-]*$" }, + "__stringMin1PatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912MrkAFAF0932": { + "type": "string", + "min": 1, + "pattern": "^(arn:aws(-us-gov|-cn)?:kms:[a-z-]{2,6}-(east|west|central|((north|south)(east|west)?))-[1-9]{1,2}:\\d{12}:key/)?[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}|mrk-[a-fA-F0-9]{32}$" + }, "__stringMin24Max512PatternAZaZ0902": { "type": "string", "min": 24, diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 3ff15c64477d..9bc2c62f166d 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 medialive diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 795751c49c9e..147932581f3b 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index 7fd044fcfa76..892bb6066b43 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 5cba21447042..ec3fc7501b0f 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index ac7e1ac5d672..d56be460e8b9 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 7e992601c714..2a0e3305d4d8 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index e92f4b9ab4a1..1f4ac06a2ee0 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/medicalimaging/pom.xml b/services/medicalimaging/pom.xml index 9664f853840e..6536ddb6d344 100644 --- a/services/medicalimaging/pom.xml +++ b/services/medicalimaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT medicalimaging AWS Java SDK :: Services :: Medical Imaging diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index 6f0d0c95d0f3..9528badac1cb 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index f7e8125eedc3..da75d2331371 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 576bc4e26be9..b7b3bf0013e4 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 326edf418b16..e347f851fdc1 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index c49a27da1809..e54c191d5079 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index 839e378813e9..11de0245667a 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index 91a50da10a33..af1a348b422b 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mq/pom.xml b/services/mq/pom.xml index e30fc131c7cb..506b5faa74b3 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 71ee713b2169..5611de68e57d 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 93ffd5a1a1aa..154b672d1849 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/mwaa/src/main/resources/codegen-resources/service-2.json b/services/mwaa/src/main/resources/codegen-resources/service-2.json index f3e7465469c2..c96ab1c6500a 100644 --- a/services/mwaa/src/main/resources/codegen-resources/service-2.json +++ b/services/mwaa/src/main/resources/codegen-resources/service-2.json @@ -825,6 +825,10 @@ "Source":{ "shape":"UpdateSource", "documentation":"

        The source of the last update to the environment. Includes internal processes by Amazon MWAA, such as an environment maintenance update.

        " + }, + "WorkerReplacementStrategy":{ + "shape":"WorkerReplacementStrategy", + "documentation":"

        The worker replacement strategy used in the last update of the environment.

        " } }, "documentation":"

        Describes the status of the last update on the environment, and any errors that were encountered.

        " @@ -1375,18 +1379,50 @@ "shape":"IamRoleArn", "documentation":"

        The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role.

        " }, + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

        A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

        " + }, "AirflowVersion":{ "shape":"AirflowVersion", "documentation":"

        The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

        Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

        Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

        " }, - "SourceBucketArn":{ - "shape":"S3BucketArn", - "documentation":"

        The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

        " - }, "DagS3Path":{ "shape":"RelativePath", "documentation":"

        The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs.

        " }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

        The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

        " + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

        The Apache Airflow log types to send to CloudWatch Logs.

        " + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

        The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

        " + }, + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

        The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

        " + }, + "MaxWebservers":{ + "shape":"MaxWebservers", + "documentation":"

        The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " + }, + "MinWebservers":{ + "shape":"MinWebservers", + "documentation":"

        The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " + }, + "WorkerReplacementStrategy":{ + "shape":"WorkerReplacementStrategy", + "documentation":"

        The worker replacement strategy to use when updating the environment.

        You can select one of the following strategies:

        • Forced - Stops and replaces Apache Airflow workers without waiting for tasks to complete before an update.

        • Graceful - Allows Apache Airflow workers to complete running tasks for up to 12 hours during an update before they're stopped and replaced.

        " + }, + "NetworkConfiguration":{ + "shape":"UpdateNetworkConfigurationInput", + "documentation":"

        The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

        " + }, "PluginsS3Path":{ "shape":"RelativePath", "documentation":"

        The relative path to the plugins.zip file on your Amazon S3 bucket. For example, plugins.zip. If specified, then the plugins.zip version is required. For more information, see Installing custom plugins.

        " @@ -1403,6 +1439,14 @@ "shape":"S3ObjectVersion", "documentation":"

        The version of the requirements.txt file on your Amazon S3 bucket. You must specify a version each time a requirements.txt file is updated. For more information, see How S3 Versioning works.

        " }, + "Schedulers":{ + "shape":"Schedulers", + "documentation":"

        The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

        " + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

        " + }, "StartupScriptS3Path":{ "shape":"RelativePath", "documentation":"

        The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

        Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

        " @@ -1411,49 +1455,13 @@ "shape":"S3ObjectVersion", "documentation":"

        The version of the startup shell script in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file every time you update the script.

        Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

        3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

        For more information, see Using a startup script.

        " }, - "AirflowConfigurationOptions":{ - "shape":"AirflowConfigurationOptions", - "documentation":"

        A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

        " - }, - "EnvironmentClass":{ - "shape":"EnvironmentClass", - "documentation":"

        The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

        " - }, - "MaxWorkers":{ - "shape":"MaxWorkers", - "documentation":"

        The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

        " - }, - "NetworkConfiguration":{ - "shape":"UpdateNetworkConfigurationInput", - "documentation":"

        The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

        " - }, - "LoggingConfiguration":{ - "shape":"LoggingConfigurationInput", - "documentation":"

        The Apache Airflow log types to send to CloudWatch Logs.

        " - }, - "WeeklyMaintenanceWindowStart":{ - "shape":"WeeklyMaintenanceWindowStart", - "documentation":"

        The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

        " - }, "WebserverAccessMode":{ "shape":"WebserverAccessMode", "documentation":"

        The Apache Airflow Web server access mode. For more information, see Apache Airflow access modes.

        " }, - "MinWorkers":{ - "shape":"MinWorkers", - "documentation":"

        The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

        " - }, - "Schedulers":{ - "shape":"Schedulers", - "documentation":"

        The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

        " - }, - "MinWebservers":{ - "shape":"MinWebservers", - "documentation":"

        The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " - }, - "MaxWebservers":{ - "shape":"MaxWebservers", - "documentation":"

        The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

        The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

        " } } }, @@ -1541,6 +1549,13 @@ "max":9, "min":1, "pattern":".*(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30).*" + }, + "WorkerReplacementStrategy":{ + "type":"string", + "enum":[ + "FORCED", + "GRACEFUL" + ] } }, "documentation":"

        Amazon Managed Workflows for Apache Airflow

        This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?.

        Endpoints

        Regions

        For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference.

        " diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 881279b38208..65eb8eeedcfd 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/neptunedata/pom.xml b/services/neptunedata/pom.xml index 2c0970377400..552a26381acd 100644 --- a/services/neptunedata/pom.xml +++ b/services/neptunedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT neptunedata AWS Java SDK :: Services :: Neptunedata diff --git a/services/neptunegraph/pom.xml b/services/neptunegraph/pom.xml index edcd83c6b2fd..4193c10dc312 100644 --- a/services/neptunegraph/pom.xml +++ b/services/neptunegraph/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT neptunegraph AWS Java SDK :: Services :: Neptune Graph diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index e32bae2d607c..6efb63080751 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json index 0107ef321d91..504835e7bba3 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json +++ b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json @@ -53,6 +53,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Tags" + }, + "ListVpcEndpointAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VpcEndpointAssociations" } } } diff --git a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json index 5e4742fe7ed6..1f4850a57c62 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json +++ b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json @@ -16,6 +16,41 @@ "auth":["aws.auth#sigv4"] }, "operations":{ + "AcceptNetworkFirewallTransitGatewayAttachment":{ + "name":"AcceptNetworkFirewallTransitGatewayAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptNetworkFirewallTransitGatewayAttachmentRequest"}, + "output":{"shape":"AcceptNetworkFirewallTransitGatewayAttachmentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Accepts a transit gateway attachment request for Network Firewall. When you accept the attachment request, Network Firewall creates the necessary routing components to enable traffic flow between the transit gateway and firewall endpoints.

        You must accept a transit gateway attachment to complete the creation of a transit gateway-attached firewall, unless auto-accept is enabled on the transit gateway. After acceptance, use DescribeFirewall to verify the firewall status.

        To reject an attachment instead of accepting it, use RejectNetworkFirewallTransitGatewayAttachment.

        It can take several minutes for the attachment acceptance to complete and the firewall to become available.

        " + }, + "AssociateAvailabilityZones":{ + "name":"AssociateAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAvailabilityZonesRequest"}, + "output":{"shape":"AssociateAvailabilityZonesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InsufficientCapacityException"} + ], + "documentation":"

        Associates the specified Availability Zones with a transit gateway-attached firewall. For each Availability Zone, Network Firewall creates a firewall endpoint to process traffic. You can specify one or more Availability Zones where you want to deploy the firewall.

        After adding Availability Zones, you must update your transit gateway route tables to direct traffic through the new firewall endpoints. Use DescribeFirewall to monitor the status of the new endpoints.

        " + }, "AssociateFirewallPolicy":{ "name":"AssociateFirewallPolicy", "http":{ @@ -122,6 +157,25 @@ ], "documentation":"

        Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall.

        To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

        To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

        To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

        For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

        " }, + "CreateVpcEndpointAssociation":{ + "name":"CreateVpcEndpointAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointAssociationRequest"}, + "output":{"shape":"CreateVpcEndpointAssociationResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InsufficientCapacityException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

        Creates a firewall endpoint for an Network Firewall firewall. This type of firewall endpoint is independent of the firewall endpoints that you specify in the Firewall itself, and you define it in addition to those endpoints after the firewall has been created. You can define a VPC endpoint association using a different VPC than the one you used in the firewall specifications.

        " + }, "DeleteFirewall":{ "name":"DeleteFirewall", "http":{ @@ -158,6 +212,22 @@ ], "documentation":"

        Deletes the specified FirewallPolicy.

        " }, + "DeleteNetworkFirewallTransitGatewayAttachment":{ + "name":"DeleteNetworkFirewallTransitGatewayAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkFirewallTransitGatewayAttachmentRequest"}, + "output":{"shape":"DeleteNetworkFirewallTransitGatewayAttachmentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Deletes a transit gateway attachment from a Network Firewall. Either the firewall owner or the transit gateway owner can delete the attachment.

        After you delete a transit gateway attachment, traffic will no longer flow through the firewall endpoints.

        After you initiate the delete operation, use DescribeFirewall to monitor the deletion status.

        " + }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", "http":{ @@ -210,6 +280,23 @@ ], "documentation":"

        Deletes the specified TLSInspectionConfiguration.

        " }, + "DeleteVpcEndpointAssociation":{ + "name":"DeleteVpcEndpointAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointAssociationRequest"}, + "output":{"shape":"DeleteVpcEndpointAssociationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

        Deletes the specified VpcEndpointAssociation.

        You can check whether an endpoint association is in use by reviewing the route tables for the Availability Zones where you have the endpoint subnet mapping. You can retrieve the subnet mapping by calling DescribeVpcEndpointAssociation. You define and update the route tables through Amazon VPC. As needed, update the route tables for the Availability Zone to remove the firewall endpoint for the association. When the route tables no longer use the firewall endpoint, you can remove the endpoint association safely.

        " + }, "DescribeFirewall":{ "name":"DescribeFirewall", "http":{ @@ -226,6 +313,22 @@ ], "documentation":"

        Returns the data objects for the specified firewall.

        " }, + "DescribeFirewallMetadata":{ + "name":"DescribeFirewallMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFirewallMetadataRequest"}, + "output":{"shape":"DescribeFirewallMetadataResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns the high-level information about a firewall, including the Availability Zones where the Firewall is currently in use.

        " + }, "DescribeFirewallPolicy":{ "name":"DescribeFirewallPolicy", "http":{ @@ -338,6 +441,40 @@ ], "documentation":"

        Returns the data objects for the specified TLS inspection configuration.

        " }, + "DescribeVpcEndpointAssociation":{ + "name":"DescribeVpcEndpointAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointAssociationRequest"}, + "output":{"shape":"DescribeVpcEndpointAssociationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns the data object for the specified VPC endpoint association.

        " + }, + "DisassociateAvailabilityZones":{ + "name":"DisassociateAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAvailabilityZonesRequest"}, + "output":{"shape":"DisassociateAvailabilityZonesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

        Removes the specified Availability Zone associations from a transit gateway-attached firewall. This removes the firewall endpoints from these Availability Zones and stops traffic filtering in those zones. Before removing an Availability Zone, ensure you've updated your transit gateway route tables to redirect traffic appropriately.

        If AvailabilityZoneChangeProtection is enabled, you must first disable it using UpdateAvailabilityZoneChangeProtection.

        To verify the status of your Availability Zone changes, use DescribeFirewall.

        " + }, "DisassociateSubnets":{ "name":"DisassociateSubnets", "http":{ @@ -496,6 +633,21 @@ ], "documentation":"

        Retrieves the tags associated with the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource.

        You can tag the Amazon Web Services resources that you manage through Network Firewall: firewalls, firewall policies, and rule groups.

        " }, + "ListVpcEndpointAssociations":{ + "name":"ListVpcEndpointAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVpcEndpointAssociationsRequest"}, + "output":{"shape":"ListVpcEndpointAssociationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

        Retrieves the metadata for the VPC endpoint associations that you have defined. If you specify a fireawll, this returns only the endpoint associations for that firewall.

        Depending on your setting for max results and the number of associations, a single call might not return the full list.

        " + }, "PutResourcePolicy":{ "name":"PutResourcePolicy", "http":{ @@ -511,7 +663,23 @@ {"shape":"ThrottlingException"}, {"shape":"InvalidResourcePolicyException"} ], - "documentation":"

        Creates or updates an IAM policy for your rule group or firewall policy. Use this to share rule groups and firewall policies between accounts. This operation works in conjunction with the Amazon Web Services Resource Access Manager (RAM) service to manage resource sharing for Network Firewall.

        Use this operation to create or update a resource policy for your rule group or firewall policy. In the policy, you specify the accounts that you want to share the resource with and the operations that you want the accounts to be able to perform.

        When you add an account in the resource policy, you then run the following Resource Access Manager (RAM) operations to access and accept the shared rule group or firewall policy.

        For additional information about resource sharing using RAM, see Resource Access Manager User Guide.

        " + "documentation":"

        Creates or updates an IAM policy for your rule group, firewall policy, or firewall. Use this to share these resources between accounts. This operation works in conjunction with the Amazon Web Services Resource Access Manager (RAM) service to manage resource sharing for Network Firewall.

        For information about using sharing with Network Firewall resources, see Sharing Network Firewall resources in the Network Firewall Developer Guide.

        Use this operation to create or update a resource policy for your Network Firewall rule group, firewall policy, or firewall. In the resource policy, you specify the accounts that you want to share the Network Firewall resource with and the operations that you want the accounts to be able to perform.

        When you add an account in the resource policy, you then run the following Resource Access Manager (RAM) operations to access and accept the shared resource.

        For additional information about resource sharing using RAM, see Resource Access Manager User Guide.

        " + }, + "RejectNetworkFirewallTransitGatewayAttachment":{ + "name":"RejectNetworkFirewallTransitGatewayAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectNetworkFirewallTransitGatewayAttachmentRequest"}, + "output":{"shape":"RejectNetworkFirewallTransitGatewayAttachmentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Rejects a transit gateway attachment request for Network Firewall. When you reject the attachment request, Network Firewall cancels the creation of routing components between the transit gateway and firewall endpoints.

        Only the transit gateway owner can reject the attachment. After rejection, no traffic will flow through the firewall endpoints for this attachment.

        Use DescribeFirewall to monitor the rejection status. To accept the attachment instead of rejecting it, use AcceptNetworkFirewallTransitGatewayAttachment.

        Once rejected, you cannot reverse this action. To establish connectivity, you must create a new transit gateway-attached firewall.

        " }, "StartAnalysisReport":{ "name":"StartAnalysisReport", @@ -593,6 +761,24 @@ ], "documentation":"

        Removes the tags with the specified keys from the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource.

        You can manage tags for the Amazon Web Services resources that you manage through Network Firewall: firewalls, firewall policies, and rule groups.

        " }, + "UpdateAvailabilityZoneChangeProtection":{ + "name":"UpdateAvailabilityZoneChangeProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAvailabilityZoneChangeProtectionRequest"}, + "output":{"shape":"UpdateAvailabilityZoneChangeProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

        Modifies the AvailabilityZoneChangeProtection setting for a transit gateway-attached firewall. When enabled, this setting prevents accidental changes to the firewall's Availability Zone configuration. This helps protect against disrupting traffic flow in production environments.

        When enabled, you must disable this protection before using AssociateAvailabilityZones or DisassociateAvailabilityZones to modify the firewall's Availability Zone configuration.

        " + }, "UpdateFirewallAnalysisSettings":{ "name":"UpdateFirewallAnalysisSettings", "http":{ @@ -769,6 +955,46 @@ } }, "shapes":{ + "AWSAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "AZSyncState":{ + "type":"structure", + "members":{ + "Attachment":{"shape":"Attachment"} + }, + "documentation":"

        The status of the firewall endpoint defined by a VpcEndpointAssociation.

        " + }, + "AcceptNetworkFirewallTransitGatewayAttachmentRequest":{ + "type":"structure", + "required":["TransitGatewayAttachmentId"], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        Required. The unique identifier of the transit gateway attachment to accept. This ID is returned in the response when creating a transit gateway-attached firewall.

        " + } + } + }, + "AcceptNetworkFirewallTransitGatewayAttachmentResponse":{ + "type":"structure", + "required":[ + "TransitGatewayAttachmentId", + "TransitGatewayAttachmentStatus" + ], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment that was accepted.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment. Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        " + } + } + }, "ActionDefinition":{ "type":"structure", "members":{ @@ -900,6 +1126,49 @@ }, "documentation":"

        The results of a COMPLETED analysis report generated with StartAnalysisReport.

        For an example of traffic analysis report results, see the response syntax of GetAnalysisReportResults.

        " }, + "AssociateAvailabilityZonesRequest":{ + "type":"structure", + "required":["AvailabilityZoneMappings"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        Required. The Availability Zones where you want to create firewall endpoints. You must specify at least one Availability Zone.

        " + } + } + }, + "AssociateAvailabilityZonesResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        The Availability Zones where Network Firewall created firewall endpoints. Each mapping specifies an Availability Zone where the firewall processes traffic.

        " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + } + } + }, "AssociateFirewallPolicyRequest":{ "type":"structure", "required":["FirewallPolicyArn"], @@ -986,6 +1255,11 @@ } } }, + "AssociationSyncState":{ + "type":"map", + "key":{"shape":"AvailabilityZone"}, + "value":{"shape":"AZSyncState"} + }, "Attachment":{ "type":"structure", "members":{ @@ -999,15 +1273,16 @@ }, "Status":{ "shape":"AttachmentStatus", - "documentation":"

        The current status of the firewall endpoint in the subnet. This value reflects both the instantiation of the endpoint in the VPC subnet and the sync states that are reported in the Config settings. When this value is READY, the endpoint is available and configured properly to handle network traffic. When the endpoint isn't available for traffic, this value will reflect its state, for example CREATING or DELETING.

        " + "documentation":"

        The current status of the firewall endpoint instantiation in the subnet.

        When this value is READY, the endpoint is available to handle network traffic. Otherwise, this value reflects its state, for example CREATING or DELETING.

        " }, "StatusMessage":{ "shape":"StatusMessage", "documentation":"

        If Network Firewall fails to create or delete the firewall endpoint in the subnet, it populates this with the reason for the error or failure and how to resolve it. A FAILED status indicates a non-recoverable state, and a ERROR status indicates an issue that you can fix. Depending on the error, it can take as many as 15 minutes to populate this field. For more information about the causes for failiure or errors and solutions available for this field, see Troubleshooting firewall endpoint failures in the Network Firewall Developer Guide.

        " } }, - "documentation":"

        The configuration and status for a single subnet that you've specified for use by the Network Firewall firewall. This is part of the FirewallStatus.

        " + "documentation":"

        The definition and status of the firewall endpoint for a single subnet. In each configured subnet, Network Firewall instantiates a firewall endpoint to handle network traffic.

        This data type is used for any firewall endpoint type:

        • For Firewall.SubnetMappings, this Attachment is part of the FirewallStatus sync states information. You define firewall subnets using CreateFirewall and AssociateSubnets.

        • For VpcEndpointAssociation, this Attachment is part of the VpcEndpointAssociationStatus sync states information. You define these subnets using CreateVpcEndpointAssociation.

        " }, + "AttachmentId":{"type":"string"}, "AttachmentStatus":{ "type":"string", "enum":[ @@ -1020,6 +1295,37 @@ ] }, "AvailabilityZone":{"type":"string"}, + "AvailabilityZoneMapping":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "AvailabilityZone":{ + "shape":"AvailabilityZoneMappingString", + "documentation":"

        The ID of the Availability Zone where the firewall endpoint is located. For example, us-east-2a. The Availability Zone must be in the same Region as the transit gateway.

        " + } + }, + "documentation":"

        Defines the mapping between an Availability Zone and a firewall endpoint for a transit gateway-attached firewall. Each mapping represents where the firewall can process traffic. You use these mappings when calling CreateFirewall, AssociateAvailabilityZones, and DisassociateAvailabilityZones.

        To retrieve the current Availability Zone mappings for a firewall, use DescribeFirewall.

        " + }, + "AvailabilityZoneMappingString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"\\S+" + }, + "AvailabilityZoneMappings":{ + "type":"list", + "member":{"shape":"AvailabilityZoneMapping"} + }, + "AvailabilityZoneMetadata":{ + "type":"structure", + "members":{ + "IPAddressType":{ + "shape":"IPAddressType", + "documentation":"

        The IP address type of the Firewall subnet in the Availability Zone. You can't change the IP address type after you create the subnet.

        " + } + }, + "documentation":"

        High-level information about an Availability Zone where the firewall has an endpoint defined.

        " + }, "AzSubnet":{ "type":"string", "max":128, @@ -1193,6 +1499,18 @@ "EnabledAnalysisTypes":{ "shape":"EnabledAnalysisTypes", "documentation":"

        An optional setting indicating the specific traffic analysis types to enable on the firewall.

        " + }, + "TransitGatewayId":{ + "shape":"TransitGatewayId", + "documentation":"

        Required when creating a transit gateway-attached firewall. The unique identifier of the transit gateway to attach to this firewall. You can provide either a transit gateway from your account or one that has been shared with you through Resource Access Manager.

        After creating the firewall, you cannot change the transit gateway association. To use a different transit gateway, you must create a new firewall.

        For information about creating firewalls, see CreateFirewall. For specific guidance about transit gateway-attached firewalls, see Considerations for transit gateway-attached firewalls in the Network Firewall Developer Guide.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        Required. The Availability Zones where you want to create firewall endpoints for a transit gateway-attached firewall. You must specify at least one Availability Zone. Consider enabling the firewall in every Availability Zone where you have workloads to maintain Availability Zone independence.

        You can modify Availability Zones later using AssociateAvailabilityZones or DisassociateAvailabilityZones, but this may briefly disrupt traffic. The AvailabilityZoneChangeProtection setting controls whether you can make these modifications.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        Optional. A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to TRUE, you cannot add or remove Availability Zones without first disabling this protection using UpdateAvailabilityZoneChangeProtection.

        Default value: FALSE

        " } } }, @@ -1205,7 +1523,7 @@ }, "FirewallStatus":{ "shape":"FirewallStatus", - "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        " + "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        The firewall status indicates a combined status. It indicates whether all subnets are up-to-date with the latest firewall configurations, which is based on the sync states config values, and also whether all subnets have their endpoints fully enabled, based on their sync states attachment values.

        " } } }, @@ -1323,6 +1641,46 @@ } } }, + "CreateVpcEndpointAssociationRequest":{ + "type":"structure", + "required":[ + "FirewallArn", + "VpcId", + "SubnetMapping" + ], + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

        The unique identifier of the VPC where you want to create a firewall endpoint.

        " + }, + "SubnetMapping":{"shape":"SubnetMapping"}, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the VPC endpoint association.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        The key:value pairs to associate with the resource.

        " + } + } + }, + "CreateVpcEndpointAssociationResponse":{ + "type":"structure", + "members":{ + "VpcEndpointAssociation":{ + "shape":"VpcEndpointAssociation", + "documentation":"

        The configuration settings for the VPC endpoint association. These settings include the firewall and the VPC and subnet to use for the firewall endpoint.

        " + }, + "VpcEndpointAssociationStatus":{ + "shape":"VpcEndpointAssociationStatus", + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + } + } + }, "CustomAction":{ "type":"structure", "required":[ @@ -1388,6 +1746,33 @@ "FirewallStatus":{"shape":"FirewallStatus"} } }, + "DeleteNetworkFirewallTransitGatewayAttachmentRequest":{ + "type":"structure", + "required":["TransitGatewayAttachmentId"], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        Required. The unique identifier of the transit gateway attachment to delete.

        " + } + } + }, + "DeleteNetworkFirewallTransitGatewayAttachmentResponse":{ + "type":"structure", + "required":[ + "TransitGatewayAttachmentId", + "TransitGatewayAttachmentStatus" + ], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The ID of the transit gateway attachment that was deleted.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment deletion process.

        Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        " + } + } + }, "DeleteResourcePolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -1400,8 +1785,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRuleGroupRequest":{ "type":"structure", @@ -1443,13 +1827,74 @@ } } }, - "DeleteTLSInspectionConfigurationResponse":{ + "DeleteTLSInspectionConfigurationResponse":{ + "type":"structure", + "required":["TLSInspectionConfigurationResponse"], + "members":{ + "TLSInspectionConfigurationResponse":{ + "shape":"TLSInspectionConfigurationResponse", + "documentation":"

        The high-level properties of a TLS inspection configuration. This, along with the TLSInspectionConfiguration, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

        " + } + } + }, + "DeleteVpcEndpointAssociationRequest":{ + "type":"structure", + "required":["VpcEndpointAssociationArn"], + "members":{ + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + } + } + }, + "DeleteVpcEndpointAssociationResponse":{ + "type":"structure", + "members":{ + "VpcEndpointAssociation":{ + "shape":"VpcEndpointAssociation", + "documentation":"

        The configuration settings for the VPC endpoint association. These settings include the firewall and the VPC and subnet to use for the firewall endpoint.

        " + }, + "VpcEndpointAssociationStatus":{ + "shape":"VpcEndpointAssociationStatus", + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + } + } + }, + "DescribeFirewallMetadataRequest":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + } + } + }, + "DescribeFirewallMetadataResponse":{ "type":"structure", - "required":["TLSInspectionConfigurationResponse"], "members":{ - "TLSInspectionConfigurationResponse":{ - "shape":"TLSInspectionConfigurationResponse", - "documentation":"

        The high-level properties of a TLS inspection configuration. This, along with the TLSInspectionConfiguration, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

        " + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall policy.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the firewall.

        " + }, + "Status":{ + "shape":"FirewallStatusValue", + "documentation":"

        The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you have it configured. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

        " + }, + "SupportedAvailabilityZones":{ + "shape":"SupportedAvailabilityZones", + "documentation":"

        The Availability Zones that the firewall currently supports. This includes all Availability Zones for which the firewall has a subnet defined.

        " + }, + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment associated with this firewall. This field is only present for transit gateway-attached firewalls.

        " } } }, @@ -1513,7 +1958,7 @@ }, "FirewallStatus":{ "shape":"FirewallStatus", - "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        " + "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        The firewall status indicates a combined status. It indicates whether all subnets are up-to-date with the latest firewall configurations, which is based on the sync states config values, and also whether all subnets have their endpoints fully enabled, based on their sync states attachment values.

        " } } }, @@ -1532,6 +1977,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "FlowOperationId":{ "shape":"FlowOperationId", "documentation":"

        A unique identifier for the flow operation. This ID is returned in the responses to start and list commands. You provide to describe commands.

        " @@ -1549,6 +2002,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "FlowOperationId":{ "shape":"FlowOperationId", "documentation":"

        A unique identifier for the flow operation. This ID is returned in the responses to start and list commands. You provide to describe commands.

        " @@ -1595,7 +2056,11 @@ "shape":"ResourceArn", "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " }, - "LoggingConfiguration":{"shape":"LoggingConfiguration"} + "LoggingConfiguration":{"shape":"LoggingConfiguration"}, + "EnableMonitoringDashboard":{ + "shape":"EnableMonitoringDashboard", + "documentation":"

        A boolean that reflects whether or not the firewall monitoring dashboard is enabled on a firewall.

        Returns TRUE when the firewall monitoring dashboard is enabled on the firewall. Returns FALSE when the firewall monitoring dashboard is not enabled on the firewall.

        " + } } }, "DescribeResourcePolicyRequest":{ @@ -1664,7 +2129,7 @@ "StatefulRuleOptions":{"shape":"StatefulRuleOptions"}, "LastModifiedTime":{ "shape":"LastUpdateTime", - "documentation":"

        The last time that the rule group was changed.

        " + "documentation":"

        A timestamp indicating when the rule group was last modified.

        " } } }, @@ -1744,6 +2209,29 @@ } } }, + "DescribeVpcEndpointAssociationRequest":{ + "type":"structure", + "required":["VpcEndpointAssociationArn"], + "members":{ + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + } + } + }, + "DescribeVpcEndpointAssociationResponse":{ + "type":"structure", + "members":{ + "VpcEndpointAssociation":{ + "shape":"VpcEndpointAssociation", + "documentation":"

        The configuration settings for the VPC endpoint association. These settings include the firewall and the VPC and subnet to use for the firewall endpoint.

        " + }, + "VpcEndpointAssociationStatus":{ + "shape":"VpcEndpointAssociationStatus", + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + } + } + }, "Description":{ "type":"string", "max":512, @@ -1778,6 +2266,49 @@ "max":1, "min":1 }, + "DisassociateAvailabilityZonesRequest":{ + "type":"structure", + "required":["AvailabilityZoneMappings"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        Required. The Availability Zones to remove from the firewall's configuration.

        " + } + } + }, + "DisassociateAvailabilityZonesResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        The remaining Availability Zones where the firewall has endpoints after the disassociation.

        " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + } + } + }, "DisassociateSubnetsRequest":{ "type":"structure", "required":["SubnetIds"], @@ -1822,6 +2353,7 @@ } }, "Domain":{"type":"string"}, + "EnableMonitoringDashboard":{"type":"boolean"}, "EnabledAnalysisType":{ "type":"string", "enum":[ @@ -1885,7 +2417,7 @@ }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

        The public subnets that Network Firewall is using for the firewall. Each subnet must belong to a different Availability Zone.

        " + "documentation":"

        The primary public subnets that Network Firewall is using for the firewall. Network Firewall creates a firewall endpoint in each subnet. Create a subnet mapping for each Availability Zone where you want to use the firewall.

        These subnets are all defined for a single, primary VPC, and each must belong to a different Availability Zone. Each of these subnets establishes the availability of the firewall in its Availability Zone.

        In addition to these subnets, you can define other endpoints for the firewall in VpcEndpointAssociation resources. You can define these additional endpoints for any VPC, and for any of the Availability Zones where the firewall resource already has a subnet mapping. VPC endpoint associations give you the ability to protect multiple VPCs using a single firewall, and to define multiple firewall endpoints for a VPC in a single Availability Zone.

        " }, "DeleteProtection":{ "shape":"Boolean", @@ -1915,12 +2447,32 @@ "shape":"EncryptionConfiguration", "documentation":"

        A complex type that contains the Amazon Web Services KMS encryption configuration settings for your firewall.

        " }, + "NumberOfAssociations":{ + "shape":"NumberOfAssociations", + "documentation":"

        The number of VpcEndpointAssociation resources that use this firewall.

        " + }, "EnabledAnalysisTypes":{ "shape":"EnabledAnalysisTypes", "documentation":"

        An optional setting indicating the specific traffic analysis types to enable on the firewall.

        " + }, + "TransitGatewayId":{ + "shape":"TransitGatewayId", + "documentation":"

        The unique identifier of the transit gateway associated with this firewall. This field is only present for transit gateway-attached firewalls.

        " + }, + "TransitGatewayOwnerAccountId":{ + "shape":"AWSAccountId", + "documentation":"

        The Amazon Web Services account ID that owns the transit gateway. This may be different from the firewall owner's account ID when using a shared transit gateway.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        The Availability Zones where the firewall endpoints are created for a transit gateway-attached firewall. Each mapping specifies an Availability Zone where the firewall processes traffic.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to TRUE, you must first disable this protection before adding or removing Availability Zones.

        " } }, - "documentation":"

        The firewall defines the configuration settings for an Network Firewall firewall. These settings include the firewall policy, the subnets in your VPC to use for the firewall endpoints, and any tags that are attached to the firewall Amazon Web Services resource.

        The status of the firewall, for example whether it's ready to filter network traffic, is provided in the corresponding FirewallStatus. You can retrieve both objects by calling DescribeFirewall.

        " + "documentation":"

        A firewall defines the behavior of a firewall, the main VPC where the firewall is used, the Availability Zones where the firewall can be used, and one subnet to use for a firewall endpoint within each of the Availability Zones. The Availability Zones are defined implicitly in the subnet specifications.

        In addition to the firewall endpoints that you define in this Firewall specification, you can create firewall endpoints in VpcEndpointAssociation resources for any VPC, in any Availability Zone where the firewall is already in use.

        The status of the firewall, for example whether it's ready to filter network traffic, is provided in the corresponding FirewallStatus. You can retrieve both the firewall and firewall status by calling DescribeFirewall.

        " }, "FirewallMetadata":{ "type":"structure", @@ -1932,6 +2484,10 @@ "FirewallArn":{ "shape":"ResourceArn", "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment associated with this firewall. This field is only present for transit gateway-attached firewalls.

        " } }, "documentation":"

        High-level information about a firewall, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a firewall.

        " @@ -2064,22 +2620,26 @@ "members":{ "Status":{ "shape":"FirewallStatusValue", - "documentation":"

        The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you've configured it. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

        " + "documentation":"

        The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you have it configured. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

        " }, "ConfigurationSyncStateSummary":{ "shape":"ConfigurationSyncState", - "documentation":"

        The configuration sync state for the firewall. This summarizes the sync states reported in the Config settings for all of the Availability Zones where you have configured the firewall.

        When you create a firewall or update its configuration, for example by adding a rule group to its firewall policy, Network Firewall distributes the configuration changes to all zones where the firewall is in use. This summary indicates whether the configuration changes have been applied everywhere.

        This status must be IN_SYNC for the firewall to be ready for use, but it doesn't indicate that the firewall is ready. The Status setting indicates firewall readiness.

        " + "documentation":"

        The configuration sync state for the firewall. This summarizes the Config settings in the SyncStates for this firewall status object.

        When you create a firewall or update its configuration, for example by adding a rule group to its firewall policy, Network Firewall distributes the configuration changes to all Availability Zones that have subnets defined for the firewall. This summary indicates whether the configuration changes have been applied everywhere.

        This status must be IN_SYNC for the firewall to be ready for use, but it doesn't indicate that the firewall is ready. The Status setting indicates firewall readiness. It's based on this setting and the readiness of the firewall endpoints to take traffic.

        " }, "SyncStates":{ "shape":"SyncStates", - "documentation":"

        The subnets that you've configured for use by the Network Firewall firewall. This contains one array element per Availability Zone where you've configured a subnet. These objects provide details of the information that is summarized in the ConfigurationSyncStateSummary and Status, broken down by zone and configuration object.

        " + "documentation":"

        Status for the subnets that you've configured in the firewall. This contains one array element per Availability Zone where you've configured a subnet in the firewall.

        These objects provide detailed information for the settings ConfigurationSyncStateSummary and Status.

        " }, "CapacityUsageSummary":{ "shape":"CapacityUsageSummary", - "documentation":"

        Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

        " + "documentation":"

        Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calculates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

        " + }, + "TransitGatewayAttachmentSyncState":{ + "shape":"TransitGatewayAttachmentSyncState", + "documentation":"

        The synchronization state of the transit gateway attachment. This indicates whether the firewall's transit gateway configuration is properly synchronized and operational. Use this to verify that your transit gateway configuration changes have been applied.

        " } }, - "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        " + "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        The firewall status indicates a combined status. It indicates whether all subnets are up-to-date with the latest firewall configurations, which is based on the sync states config values, and also whether all subnets have their endpoints fully enabled, based on their sync states attachment values.

        " }, "FirewallStatusValue":{ "type":"string", @@ -2613,6 +3173,14 @@ "AvailabilityZone":{ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " } } }, @@ -2627,6 +3195,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        " + }, "FlowOperationId":{ "shape":"FlowOperationId", "documentation":"

        A unique identifier for the flow operation. This ID is returned in the responses to start and list commands. You provide to describe commands.

        " @@ -2665,6 +3241,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "FlowOperationType":{ "shape":"FlowOperationType", "documentation":"

        An optional string that defines whether any or all operation types are returned.

        " @@ -2787,6 +3371,36 @@ } } }, + "ListVpcEndpointAssociationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

        When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

        " + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

        The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        If you don't specify this, Network Firewall retrieves all VPC endpoint associations that you have defined.

        " + } + } + }, + "ListVpcEndpointAssociationsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

        When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

        " + }, + "VpcEndpointAssociations":{ + "shape":"VpcEndpointAssociations", + "documentation":"

        The VPC endpoint assocation metadata objects for the firewall that you specified. If you didn't specify a firewall, this is all VPC endpoint associations that you have defined.

        Depending on your setting for max results and the number of firewalls you have, a single call might not be the full list.

        " + } + } + }, "LogDestinationConfig":{ "type":"structure", "required":[ @@ -3034,18 +3648,17 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

        The Amazon Resource Name (ARN) of the account that you want to share rule groups and firewall policies with.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the account that you want to share your Network Firewall resources with.

        " }, "Policy":{ "shape":"PolicyString", - "documentation":"

        The IAM policy statement that lists the accounts that you want to share your rule group or firewall policy with and the operations that you want the accounts to be able to perform.

        For a rule group resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:CreateFirewallPolicy

        • network-firewall:UpdateFirewallPolicy

        • network-firewall:ListRuleGroups

        For a firewall policy resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:AssociateFirewallPolicy

        • network-firewall:ListFirewallPolicies

        In the Resource section of the statement, you specify the ARNs for the rule groups and firewall policies that you want to share with the account that you specified in Arn.

        " + "documentation":"

        The IAM policy statement that lists the accounts that you want to share your Network Firewall resources with and the operations that you want the accounts to be able to perform.

        For a rule group resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:CreateFirewallPolicy

        • network-firewall:UpdateFirewallPolicy

        • network-firewall:ListRuleGroups

        For a firewall policy resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:AssociateFirewallPolicy

        • network-firewall:ListFirewallPolicies

        For a firewall resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:CreateVpcEndpointAssociation

        • network-firewall:DescribeFirewallMetadata

        • network-firewall:ListFirewalls

        In the Resource section of the statement, you specify the ARNs for the Network Firewall resources that you want to share with the account that you specified in Arn.

        " } } }, "PutResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ReferenceSets":{ "type":"structure", @@ -3057,6 +3670,33 @@ }, "documentation":"

        Contains a set of IP set references.

        " }, + "RejectNetworkFirewallTransitGatewayAttachmentRequest":{ + "type":"structure", + "required":["TransitGatewayAttachmentId"], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        Required. The unique identifier of the transit gateway attachment to reject. This ID is returned in the response when creating a transit gateway-attached firewall.

        " + } + } + }, + "RejectNetworkFirewallTransitGatewayAttachmentResponse":{ + "type":"structure", + "required":[ + "TransitGatewayAttachmentId", + "TransitGatewayAttachmentStatus" + ], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment that was rejected.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment. Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        For information about troubleshooting endpoint failures, see Troubleshooting firewall endpoint failures in the Network Firewall Developer Guide.

        " + } + } + }, "ReportTime":{"type":"timestamp"}, "ResourceArn":{ "type":"string", @@ -3236,7 +3876,7 @@ }, "SnsTopic":{ "shape":"ResourceArn", - "documentation":"

        The Amazon resource name (ARN) of the Amazon Simple Notification Service SNS topic that's used to record changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide..

        " + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon Simple Notification Service SNS topic that's used to record changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide..

        " }, "LastModifiedTime":{ "shape":"LastUpdateTime", @@ -3312,7 +3952,7 @@ "documentation":"

        A list of port ranges.

        " } }, - "documentation":"

        Settings that are available for use in the rules in the RuleGroup where this is defined.

        " + "documentation":"

        Settings that are available for use in the rules in the RuleGroup where this is defined. See CreateRuleGroup or UpdateRuleGroup for usage.

        " }, "RulesSource":{ "type":"structure", @@ -3387,7 +4027,7 @@ }, "CertificateAuthorityArn":{ "shape":"ResourceArn", - "documentation":"

        The Amazon Resource Name (ARN) of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection.

        The following limitations apply:

        • You can use CA certificates that you imported into ACM, but you can't generate CA certificates with ACM.

        • You can't use certificates issued by Private Certificate Authority.

        For more information about configuring certificates for outbound inspection, see Using SSL/TLS certificates with certificates with TLS inspection configurations in the Network Firewall Developer Guide.

        For information about working with certificates in ACM, see Importing certificates in the Certificate Manager User Guide.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection.

        The following limitations apply:

        • You can use CA certificates that you imported into ACM, but you can't generate CA certificates with ACM.

        • You can't use certificates issued by Private Certificate Authority.

        For more information about configuring certificates for outbound inspection, see Using SSL/TLS certificates with TLS inspection configurations in the Network Firewall Developer Guide.

        For information about working with certificates in ACM, see Importing certificates in the Certificate Manager User Guide.

        " }, "CheckCertificateRevocationStatus":{ "shape":"CheckCertificateRevocationStatusActions", @@ -3507,6 +4147,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "MinimumFlowAgeInSeconds":{ "shape":"Age", "documentation":"

        The reqested FlowOperation ignores flows with an age (in seconds) lower than MinimumFlowAgeInSeconds. You provide this for start commands.

        We recommend setting this value to at least 1 minute (60 seconds) to reduce chance of capturing flows that are not yet established.

        " @@ -3549,6 +4197,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "MinimumFlowAgeInSeconds":{ "shape":"Age", "documentation":"

        The reqested FlowOperation ignores flows with an age (in seconds) lower than MinimumFlowAgeInSeconds. You provide this for start commands.

        " @@ -3595,7 +4251,7 @@ "members":{ "RuleOrder":{ "shape":"RuleOrder", - "documentation":"

        Indicates how to manage the order of stateful rule evaluation for the policy. STRICT_ORDER is the default and recommended option. With STRICT_ORDER, provide your rules in the order that you want them to be evaluated. You can then choose one or more default actions for packets that don't match any rules. Choose STRICT_ORDER to have the stateful rules engine determine the evaluation order of your rules. The default action for this rule order is PASS, followed by DROP, REJECT, and ALERT actions. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on your settings. For more information, see Evaluation order for stateful rules in the Network Firewall Developer Guide.

        " + "documentation":"

        Indicates how to manage the order of stateful rule evaluation for the policy. STRICT_ORDER is the recommended option, but DEFAULT_ACTION_ORDER is the default option. With STRICT_ORDER, provide your rules in the order that you want them to be evaluated. You can then choose one or more default actions for packets that don't match any rules. Choose STRICT_ORDER to have the stateful rules engine determine the evaluation order of your rules. The default action for this rule order is PASS, followed by DROP, REJECT, and ALERT actions. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on your settings. For more information, see Evaluation order for stateful rules in the Network Firewall Developer Guide.

        " }, "StreamExceptionPolicy":{ "shape":"StreamExceptionPolicy", @@ -3703,7 +4359,9 @@ "IKEV2", "TFTP", "NTP", - "DHCP" + "DHCP", + "HTTP2", + "QUIC" ] }, "StatefulRules":{ @@ -3802,25 +4460,30 @@ "documentation":"

        The subnet's IP address type. You can't change the IP address type after you create the subnet.

        " } }, - "documentation":"

        The ID for a subnet that you want to associate with the firewall. This is used with CreateFirewall and AssociateSubnets. Network Firewall creates an instance of the associated firewall in each subnet that you specify, to filter traffic in the subnet's Availability Zone.

        " + "documentation":"

        The ID for a subnet that's used in an association with a firewall. This is used in CreateFirewall, AssociateSubnets, and CreateVpcEndpointAssociation. Network Firewall creates an instance of the associated firewall in each subnet that you specify, to filter traffic in the subnet's Availability Zone.

        " }, "SubnetMappings":{ "type":"list", "member":{"shape":"SubnetMapping"} }, + "SupportedAvailabilityZones":{ + "type":"map", + "key":{"shape":"AvailabilityZone"}, + "value":{"shape":"AvailabilityZoneMetadata"} + }, "SyncState":{ "type":"structure", "members":{ "Attachment":{ "shape":"Attachment", - "documentation":"

        The attachment status of the firewall's association with a single VPC subnet. For each configured subnet, Network Firewall creates the attachment by instantiating the firewall endpoint in the subnet so that it's ready to take traffic. This is part of the FirewallStatus.

        " + "documentation":"

        The configuration and status for a single firewall subnet. For each configured subnet, Network Firewall creates the attachment by instantiating the firewall endpoint in the subnet so that it's ready to take traffic.

        " }, "Config":{ "shape":"SyncStateConfig", - "documentation":"

        The configuration status of the firewall endpoint in a single VPC subnet. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic. This is part of the FirewallStatus.

        " + "documentation":"

        The configuration status of the firewall endpoint in a single VPC subnet. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic.

        " } }, - "documentation":"

        The status of the firewall endpoint and firewall policy configuration for a single VPC subnet.

        For each VPC subnet that you associate with a firewall, Network Firewall does the following:

        • Instantiates a firewall endpoint in the subnet, ready to take traffic.

        • Configures the endpoint with the current firewall policy settings, to provide the filtering behavior for the endpoint.

        When you update a firewall, for example to add a subnet association or change a rule group in the firewall policy, the affected sync states reflect out-of-sync or not ready status until the changes are complete.

        " + "documentation":"

        The status of the firewall endpoint and firewall policy configuration for a single VPC subnet. This is part of the FirewallStatus.

        For each VPC subnet that you associate with a firewall, Network Firewall does the following:

        • Instantiates a firewall endpoint in the subnet, ready to take traffic.

        • Configures the endpoint with the current firewall policy settings, to provide the filtering behavior for the endpoint.

        When you update a firewall, for example to add a subnet association or change a rule group in the firewall policy, the affected sync states reflect out-of-sync or not ready status until the changes are complete.

        " }, "SyncStateConfig":{ "type":"map", @@ -3999,8 +4662,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4055,6 +4717,51 @@ }, "documentation":"

        Contains metadata about an Certificate Manager certificate.

        " }, + "TransitGatewayAttachmentId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^tgw-attach-[0-9a-z]+$" + }, + "TransitGatewayAttachmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "DELETED", + "FAILED", + "ERROR", + "READY", + "PENDING_ACCEPTANCE", + "REJECTING", + "REJECTED" + ] + }, + "TransitGatewayAttachmentSyncState":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"AttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment.

        Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        " + }, + "StatusMessage":{ + "shape":"TransitGatewayAttachmentSyncStateMessage", + "documentation":"

        A message providing additional information about the current status, particularly useful when the transit gateway attachment is in a non-READY state.

        Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        For information about troubleshooting endpoint failures, see Troubleshooting firewall endpoint failures in the Network Firewall Developer Guide.

        " + } + }, + "documentation":"

        Contains information about the synchronization state of a transit gateway attachment, including its current status and any error messages. Network Firewall uses this to track the state of your transit gateway configuration changes.

        " + }, + "TransitGatewayAttachmentSyncStateMessage":{"type":"string"}, + "TransitGatewayId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^tgw-[0-9a-z]+$" + }, "UniqueSources":{ "type":"structure", "members":{ @@ -4091,8 +4798,50 @@ } }, "UntagResourceResponse":{ + "type":"structure", + "members":{} + }, + "UpdateAvailabilityZoneChangeProtectionRequest":{ + "type":"structure", + "required":["AvailabilityZoneChangeProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

        " + } + } + }, + "UpdateAvailabilityZoneChangeProtectionResponse":{ "type":"structure", "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

        " + } } }, "UpdateFirewallAnalysisSettingsRequest":{ @@ -4369,6 +5118,10 @@ "LoggingConfiguration":{ "shape":"LoggingConfiguration", "documentation":"

        Defines how Network Firewall performs logging for a firewall. If you omit this setting, Network Firewall disables logging for the firewall.

        " + }, + "EnableMonitoringDashboard":{ + "shape":"EnableMonitoringDashboard", + "documentation":"

        A boolean that lets you enable or disable the detailed firewall monitoring dashboard on the firewall.

        The monitoring dashboard provides comprehensive visibility into your firewall's flow logs and alert logs. After you enable detailed monitoring, you can access these dashboards directly from the Monitoring page of the Network Firewall console.

        Specify TRUE to enable the the detailed monitoring dashboard on the firewall. Specify FALSE to disable the the detailed monitoring dashboard on the firewall.

        " } } }, @@ -4383,7 +5136,11 @@ "shape":"ResourceName", "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " }, - "LoggingConfiguration":{"shape":"LoggingConfiguration"} + "LoggingConfiguration":{"shape":"LoggingConfiguration"}, + "EnableMonitoringDashboard":{ + "shape":"EnableMonitoringDashboard", + "documentation":"

        A boolean that reflects whether or not the firewall monitoring dashboard is enabled on a firewall.

        Returns TRUE when the firewall monitoring dashboard is enabled on the firewall. Returns FALSE when the firewall monitoring dashboard is not enabled on the firewall.

        " + } } }, "UpdateRuleGroupRequest":{ @@ -4561,6 +5318,78 @@ "type":"list", "member":{"shape":"VariableDefinition"} }, + "VpcEndpointAssociation":{ + "type":"structure", + "required":[ + "VpcEndpointAssociationArn", + "FirewallArn", + "VpcId", + "SubnetMapping" + ], + "members":{ + "VpcEndpointAssociationId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VPC endpoint association.

        " + }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

        The unique identifier of the VPC for the endpoint association.

        " + }, + "SubnetMapping":{"shape":"SubnetMapping"}, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the VPC endpoint association.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        The key:value pairs to associate with the resource.

        " + } + }, + "documentation":"

        A VPC endpoint association defines a single subnet to use for a firewall endpoint for a Firewall. You can define VPC endpoint associations only in the Availability Zones that already have a subnet mapping defined in the Firewall resource.

        You can retrieve the list of Availability Zones that are available for use by calling DescribeFirewallMetadata.

        To manage firewall endpoints, first, in the Firewall specification, you specify a single VPC and one subnet for each of the Availability Zones where you want to use the firewall. Then you can define additional endpoints as VPC endpoint associations.

        You can use VPC endpoint associations to expand the protections of the firewall as follows:

        • Protect multiple VPCs with a single firewall - You can use the firewall to protect other VPCs, either in your account or in accounts where the firewall is shared. You can only specify Availability Zones that already have a firewall endpoint defined in the Firewall subnet mappings.

        • Define multiple firewall endpoints for a VPC in an Availability Zone - You can create additional firewall endpoints for the VPC that you have defined in the firewall, in any Availability Zone that already has an endpoint defined in the Firewall subnet mappings. You can create multiple VPC endpoint associations for any other VPC where you use the firewall.

        You can use Resource Access Manager to share a Firewall that you own with other accounts, which gives them the ability to use the firewall to create VPC endpoint associations. For information about sharing a firewall, see PutResourcePolicy in this guide and see Sharing Network Firewall resources in the Network Firewall Developer Guide.

        The status of the VPC endpoint association, which indicates whether it's ready to filter network traffic, is provided in the corresponding VpcEndpointAssociationStatus. You can retrieve both the association and its status by calling DescribeVpcEndpointAssociation.

        " + }, + "VpcEndpointAssociationMetadata":{ + "type":"structure", + "members":{ + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + } + }, + "documentation":"

        High-level information about a VPC endpoint association, returned by ListVpcEndpointAssociations. You can use the information provided in the metadata to retrieve and manage a VPC endpoint association.

        " + }, + "VpcEndpointAssociationStatus":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"FirewallStatusValue", + "documentation":"

        The readiness of the configured firewall endpoint to handle network traffic.

        " + }, + "AssociationSyncState":{ + "shape":"AssociationSyncState", + "documentation":"

        The list of the Availability Zone sync states for all subnets that are defined by the firewall.

        " + } + }, + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + }, + "VpcEndpointAssociations":{ + "type":"list", + "member":{"shape":"VpcEndpointAssociationMetadata"} + }, + "VpcEndpointId":{ + "type":"string", + "max":256, + "min":5, + "pattern":"^vpce-[a-zA-Z0-9]*$" + }, "VpcId":{ "type":"string", "max":128, @@ -4572,5 +5401,5 @@ "member":{"shape":"VpcId"} } }, - "documentation":"

        This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

        The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

        To view the complete list of Amazon Web Services Regions where Network Firewall is available, see Service endpoints and quotas in the Amazon Web Services General Reference.

        To access Network Firewall using the IPv4 REST API endpoint: https://network-firewall.<region>.amazonaws.com

        To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint: https://network-firewall.<region>.aws.api

        Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

        For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

        Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 7.0.3. For information about Suricata, see the Suricata website and the Suricata User Guide.

        You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

        • Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic.

        • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

        • Perform deep packet inspection on traffic entering or leaving your VPC.

        • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

        To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

        To start using Network Firewall, do the following:

        1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

        2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

        3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

        4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

        5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

        6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

        " + "documentation":"

        This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

        The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

        To view the complete list of Amazon Web Services Regions where Network Firewall is available, see Service endpoints and quotas in the Amazon Web Services General Reference.

        To access Network Firewall using the IPv4 REST API endpoint: https://network-firewall.<region>.amazonaws.com

        To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint: https://network-firewall.<region>.aws.api

        Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

        For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

        Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 7.0.3. For information about Suricata, see the Suricata website and the Suricata User Guide.

        You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

        • Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic.

        • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

        • Perform deep packet inspection on traffic entering or leaving your VPC.

        • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

        To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

        To start using Network Firewall, do the following:

        1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

        2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

        3. In Network Firewall, define the firewall behavior as follows:

          1. Create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

          2. Create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

        4. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

        5. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

        After your firewall is established, you can add firewall endpoints for new Availability Zones by following the prior steps for the Amazon VPC setup and firewall subnet definitions. You can also add endpoints to Availability Zones that you're using in the firewall, either for the same VPC or for another VPC, by following the prior steps for the Amazon VPC setup, and defining the new VPC subnets as VPC endpoint associations.

        " } diff --git a/services/networkflowmonitor/pom.xml b/services/networkflowmonitor/pom.xml index 861dad311e5a..9e8f6cf0ceef 100644 --- a/services/networkflowmonitor/pom.xml +++ b/services/networkflowmonitor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT networkflowmonitor AWS Java SDK :: Services :: Network Flow Monitor diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index f32e98be5e3b..4647921be904 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/networkmanager/src/main/resources/codegen-resources/service-2.json b/services/networkmanager/src/main/resources/codegen-resources/service-2.json index e64ee0b937d9..a578b2c681c8 100644 --- a/services/networkmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/networkmanager/src/main/resources/codegen-resources/service-2.json @@ -2611,6 +2611,18 @@ "ServiceInsertionActions":{ "shape":"ServiceInsertionActionList", "documentation":"

        Describes the service insertion action.

        " + }, + "VpnEcmpSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether Equal Cost Multipath (ECMP) is enabled for the core network.

        " + }, + "DnsSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether public DNS support is supported. The default is true.

        " + }, + "SecurityGroupReferencingSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether security group referencing is enabled for the core network.

        " } }, "documentation":"

        Describes a core network change.

        " @@ -3716,8 +3728,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSiteRequest":{ "type":"structure", @@ -4102,8 +4113,7 @@ }, "ExecuteCoreNetworkChangeSetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ExternalRegionCode":{ "type":"string", @@ -6256,8 +6266,7 @@ }, "PutResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ReasonContextKey":{ "type":"string", @@ -6903,8 +6912,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -7120,8 +7128,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateConnectionRequest":{ "type":"structure", @@ -7552,6 +7559,14 @@ "ApplianceModeSupport":{ "shape":"Boolean", "documentation":"

        Indicates whether appliance mode is supported. If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. The default value is false.

        " + }, + "DnsSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether DNS is supported.

        " + }, + "SecurityGroupReferencingSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether security group referencing is enabled for this VPC attachment. The default is true. However, at the core network policy-level the default is set to false.

        " } }, "documentation":"

        Describes the VPC options.

        " diff --git a/services/networkmonitor/pom.xml b/services/networkmonitor/pom.xml index 850918e15d8c..275c69666e05 100644 --- a/services/networkmonitor/pom.xml +++ b/services/networkmonitor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT networkmonitor AWS Java SDK :: Services :: Network Monitor diff --git a/services/notifications/pom.xml b/services/notifications/pom.xml index 71286b9f3a15..87b861faacd5 100644 --- a/services/notifications/pom.xml +++ b/services/notifications/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT notifications AWS Java SDK :: Services :: Notifications diff --git a/services/notificationscontacts/pom.xml b/services/notificationscontacts/pom.xml index aff246fe284f..13f09a5c06b5 100644 --- a/services/notificationscontacts/pom.xml +++ b/services/notificationscontacts/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT notificationscontacts AWS Java SDK :: Services :: Notifications Contacts diff --git a/services/oam/pom.xml b/services/oam/pom.xml index 3f189d98e969..62d42d197a08 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/observabilityadmin/pom.xml b/services/observabilityadmin/pom.xml index aa092e913caf..e7af45e21aaf 100644 --- a/services/observabilityadmin/pom.xml +++ b/services/observabilityadmin/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT observabilityadmin AWS Java SDK :: Services :: Observability Admin diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 41964adb9a49..a39dedfe37a2 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index 69ba5b7c1261..9e2353336b0f 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index 8b5a0623fdf3..b8f8fffb0f7b 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 121294cfd45f..a7122f820bd5 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index b6aad88d966c..d1ac20dfda16 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 2493cdec55c5..47dc673a95ab 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/osis/pom.xml b/services/osis/pom.xml index 5183ead749cb..ad0b4a5237d1 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index b7c3ffbd8a95..ccf9cb82c27f 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index ab92de5c12cf..f136d8363009 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/partnercentralselling/pom.xml b/services/partnercentralselling/pom.xml index 567aeafe61d2..19d9490ccfa4 100644 --- a/services/partnercentralselling/pom.xml +++ b/services/partnercentralselling/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT partnercentralselling AWS Java SDK :: Services :: Partner Central Selling diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index ef17b6f220b7..7f9d890664ee 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index 398cd478aa51..c55cc12e8864 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/pcaconnectorad/pom.xml b/services/pcaconnectorad/pom.xml index a40669b6b4dc..d7874a8b9d1e 100644 --- a/services/pcaconnectorad/pom.xml +++ b/services/pcaconnectorad/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pcaconnectorad AWS Java SDK :: Services :: Pca Connector Ad diff --git a/services/pcaconnectorscep/pom.xml b/services/pcaconnectorscep/pom.xml index e0640977cfe5..3f39cd6c9281 100644 --- a/services/pcaconnectorscep/pom.xml +++ b/services/pcaconnectorscep/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pcaconnectorscep AWS Java SDK :: Services :: Pca Connector Scep diff --git a/services/pcs/pom.xml b/services/pcs/pom.xml index 902044606e0a..d49571bbe2ad 100644 --- a/services/pcs/pom.xml +++ b/services/pcs/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pcs AWS Java SDK :: Services :: PCS diff --git a/services/pcs/src/main/resources/codegen-resources/service-2.json b/services/pcs/src/main/resources/codegen-resources/service-2.json index c4f3b14af2ab..9f2597a82478 100644 --- a/services/pcs/src/main/resources/codegen-resources/service-2.json +++ b/services/pcs/src/main/resources/codegen-resources/service-2.json @@ -401,7 +401,7 @@ "type":"string", "max":1011, "min":1, - "pattern":"arn:aws*:pcs:.*:[0-9]{12}:.*/[a-z0-9_\\-]+" + "pattern":"arn:aws.*:pcs:.*:[0-9]{12}:.*/[a-z0-9_\\-]+" }, "BootstrapId":{ "type":"string", @@ -437,7 +437,7 @@ }, "status":{ "shape":"ClusterStatus", - "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        " + "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " }, "createdAt":{ "shape":"SyntheticTimestamp_date_time", @@ -541,7 +541,9 @@ "DELETING", "CREATE_FAILED", "DELETE_FAILED", - "UPDATE_FAILED" + "UPDATE_FAILED", + "SUSPENDING", + "SUSPENDED" ] }, "ClusterSummary":{ @@ -577,7 +579,7 @@ }, "status":{ "shape":"ClusterStatus", - "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        " + "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " } }, "documentation":"

        The object returned by the ListClusters API action.

        " @@ -625,7 +627,7 @@ }, "status":{ "shape":"ComputeNodeGroupStatus", - "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        " + "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " }, "amiId":{ "shape":"AmiId", @@ -716,7 +718,9 @@ "CREATE_FAILED", "DELETE_FAILED", "UPDATE_FAILED", - "DELETED" + "DELETED", + "SUSPENDING", + "SUSPENDED" ] }, "ComputeNodeGroupSummary":{ @@ -757,7 +761,7 @@ }, "status":{ "shape":"ComputeNodeGroupStatus", - "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        " + "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " } }, "documentation":"

        The object returned by the ListComputeNodeGroups API action.

        " @@ -1164,7 +1168,7 @@ }, "InstanceProfileArn":{ "type":"string", - "pattern":"arn:aws([a-zA-Z-]{0,10})?:iam::[0-9]{12}:instance-profile/.{1,128}" + "pattern":"arn:aws([a-zA-Z-]{0,10})?:iam::[0-9]{12}:instance-profile/[\\w+=,.@-]{1,128}" }, "Integer":{ "type":"integer", @@ -1371,7 +1375,7 @@ }, "status":{ "shape":"QueueStatus", - "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        " + "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " }, "computeNodeGroupConfigurations":{ "shape":"ComputeNodeGroupConfigurationList", @@ -1407,7 +1411,9 @@ "DELETING", "CREATE_FAILED", "DELETE_FAILED", - "UPDATE_FAILED" + "UPDATE_FAILED", + "SUSPENDING", + "SUSPENDED" ] }, "QueueSummary":{ @@ -1448,7 +1454,7 @@ }, "status":{ "shape":"QueueStatus", - "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        " + "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " } }, "documentation":"

        The object returned by the ListQueues API action.

        " diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 988da210a7a0..7f30763b0c8c 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index f942ab1183c1..b406f0989af5 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 458aaa5def3c..321ff578134a 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 05264c06e2eb..aad74298c205 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index ec7e1182065f..c85f7b9575a6 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 521acd9fcc3f..4b16e5bedecc 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index b94bb656e8d8..35550929f35d 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index 40ec256c2457..460c3af05a1b 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 9e73d1b10f07..f6b5663c7305 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index c3a68b25f27c..6bbffb6aa666 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/pom.xml b/services/pom.xml index 7a0aa7f8e88d..3834cd1bfc24 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT services AWS Java SDK :: Services @@ -424,6 +424,7 @@ iotmanagedintegrations gameliftstreams ssmguiconnect + evs The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index f9f2e34b5072..71acc5ac2c1d 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 pricing diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 9a2f9ea7fc39..1ff9fb00a6c6 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/qapps/pom.xml b/services/qapps/pom.xml index f6d71027168b..c78b488c13aa 100644 --- a/services/qapps/pom.xml +++ b/services/qapps/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT qapps AWS Java SDK :: Services :: Q Apps diff --git a/services/qbusiness/pom.xml b/services/qbusiness/pom.xml index e2328c9dd18d..e5c42d55132c 100644 --- a/services/qbusiness/pom.xml +++ b/services/qbusiness/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT qbusiness AWS Java SDK :: Services :: Q Business diff --git a/services/qconnect/pom.xml b/services/qconnect/pom.xml index 0cc6304ffca9..67be25b48ec4 100644 --- a/services/qconnect/pom.xml +++ b/services/qconnect/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT qconnect AWS Java SDK :: Services :: Q Connect diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index b4e19fd47ba0..f590ca6786e7 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index 018e53ac2319..bd3036c51a07 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 2d564df6bf91..2b0f4a759bf2 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/ram/pom.xml b/services/ram/pom.xml index 3fcdc7107c5c..0a872bbbd8ab 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index a3f3f0c4e429..a16589259ab8 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 209e12538f08..cc6e12e41b4f 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index 015bc61ca20e..cb8235d7ef74 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -462,7 +462,7 @@ {"shape":"TenantDatabaseQuotaExceededFault"}, {"shape":"CertificateNotFoundFault"} ], - "documentation":"

        Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide.

        Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation.

        All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified.

        Your source DB instance or cluster must have backup retention enabled.

        " + "documentation":"

        Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide.

        Amazon RDS for Db2 supports this operation for standby replicas. To create a standby replica for a DB instance running Db2, you must set ReplicaMode to mounted.

        Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation.

        RDS creates read replicas with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified.

        Your source DB instance or cluster must have backup retention enabled.

        " }, "CreateDBParameterGroup":{ "name":"CreateDBParameterGroup", @@ -1812,7 +1812,7 @@ {"shape":"DBInstanceNotReadyFault"}, {"shape":"DBLogFileNotFoundFault"} ], - "documentation":"

        Downloads all or a portion of the specified log file, up to 1 MB in size.

        This command doesn't apply to RDS Custom.

        " + "documentation":"

        Downloads all or a portion of the specified log file, up to 1 MB in size.

        This command doesn't apply to RDS Custom.

        This operation uses resources on database instances. Because of this, we recommend publishing database logs to CloudWatch and then using the GetLogEvents operation. For more information, see GetLogEvents in the Amazon CloudWatch Logs API Reference.

        " }, "EnableHttpEndpoint":{ "name":"EnableHttpEndpoint", @@ -3750,7 +3750,7 @@ }, "ConnectionBorrowTimeout":{ "shape":"IntegerOptional", - "documentation":"

        The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.

        Default: 120

        Constraints:

        • Must be between 0 and 3600.

        " + "documentation":"

        The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.

        Default: 120

        Constraints:

        • Must be between 0 and 300.

        " }, "SessionPinningFilters":{ "shape":"StringList", @@ -3758,7 +3758,7 @@ }, "InitQuery":{ "shape":"String", - "documentation":"

        Add an initialization query, or modify the current one. You can specify one or more SQL statements for the proxy to run when opening each new database connection. The setting is typically used with SET statements to make sure that each connection has identical settings. Make sure that the query you add is valid. To include multiple variables in a single SET statement, use comma separators.

        For example: SET variable1=value1, variable2=value2

        For multiple statements, use semicolons as the separator.

        Default: no initialization query

        " + "documentation":"

        Add an initialization query, or modify the current one. You can specify one or more SQL statements for the proxy to run when opening each new database connection. The setting is typically used with SET statements to make sure that each connection has identical settings. Make sure the query added here is valid. This is an optional field, so you can choose to leave it empty. For including multiple variables in a single SET statement, use a comma separator.

        For example: SET variable1=value1, variable2=value2

        Default: no initialization query

        Since you can access initialization query as part of target group configuration, it is not protected by authentication or cryptographic methods. Anyone with access to view or manage your proxy target group configuration can view the initialization query. You should not add sensitive data, such as passwords or long-lived encryption keys, to this option.

        " } }, "documentation":"

        Specifies the settings that control the size and behavior of the connection pool associated with a DBProxyTargetGroup.

        " @@ -3784,7 +3784,7 @@ }, "InitQuery":{ "shape":"String", - "documentation":"

        One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2.

        " + "documentation":"

        One or more SQL statements for the proxy to run when opening each new database connection. The setting is typically used with SET statements to make sure that each connection has identical settings. The query added here must be valid. For including multiple variables in a single SET statement, use a comma separator. This is an optional field.

        For example: SET variable1=value1, variable2=value2

        Since you can access initialization query as part of target group configuration, it is not protected by authentication or cryptographic methods. Anyone with access to view or manage your proxy target group configuration can view the initialization query. You should not add sensitive data, such as passwords or long-lived encryption keys, to this option.

        " } }, "documentation":"

        Displays the settings that control the size and behavior of the connection pool associated with a DBProxyTarget.

        " @@ -4715,7 +4715,7 @@ }, "SourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

        The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five.

        Constraints:

        • Must be the identifier of an existing Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server DB instance.

        • Can't be specified if the SourceDBClusterIdentifier parameter is also specified.

        • For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas in the Amazon RDS User Guide.

        • For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide.

        • The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0.

        • If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier.

        • If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas.

        " + "documentation":"

        The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, except for the following engines:

        • Db2 - Can have up to three replicas.

        • Oracle - Can have up to five read replicas.

        • SQL Server - Can have up to five read replicas.

        Constraints:

        • Must be the identifier of an existing Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server DB instance.

        • Can't be specified if the SourceDBClusterIdentifier parameter is also specified.

        • For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas in the Amazon RDS User Guide.

        • For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide.

        • The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0.

        • If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier.

        • If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas.

        " }, "DBInstanceClass":{ "shape":"String", @@ -4747,7 +4747,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

        The name of the DB parameter group to associate with this read replica DB instance.

        For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

        For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup.

        Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

        Constraints:

        • Must be 1 to 255 letters, numbers, or hyphens.

        • First character must be a letter.

        • Can't end with a hyphen or contain two consecutive hyphens.

        " + "documentation":"

        The name of the DB parameter group to associate with this read replica DB instance.

        For the Db2 DB engine, if your source DB instance uses the Bring Your Own License model, then a custom parameter group must be associated with the replica. For a same Amazon Web Services Region replica, if you don't specify a custom parameter group, Amazon RDS associates the custom parameter group associated with the source DB instance. For a cross-Region replica, you must specify a custom parameter group. This custom parameter group must include your IBM Site ID and IBM Customer ID. For more information, see IBM IDs for Bring Your Own License for Db2.

        For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

        For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup.

        Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, for Db2 DB instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

        Constraints:

        • Must be 1 to 255 letters, numbers, or hyphens.

        • First character must be a letter.

        • Can't end with a hyphen or contain two consecutive hyphens.

        " }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -4848,7 +4848,7 @@ }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

        The open mode of the replica database: mounted or read-only.

        This parameter is only supported for Oracle DB instances.

        Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

        You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

        For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually.

        " + "documentation":"

        The open mode of the replica database.

        This parameter is only supported for Db2 DB instances and Oracle DB instances.

        Db2

        Standby DB replicas are included in Db2 Advanced Edition (AE) and Db2 Standard Edition (SE). The main use case for standby replicas is cross-Region disaster recovery. Because it doesn't accept user connections, a standby replica can't serve a read-only workload.

        You can create a combination of standby and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Db2 in the Amazon RDS User Guide.

        To create standby DB replicas for RDS for Db2, set this parameter to mounted.

        Oracle

        Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

        You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Oracle in the Amazon RDS User Guide.

        For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually.

        " }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", @@ -5084,7 +5084,7 @@ }, "ComputeRedundancy":{ "shape":"IntegerOptional", - "documentation":"

        Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

        • 0 - Creates a DB shard group without a standby DB shard group. This is the default value.

        • 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

        • 2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

        " + "documentation":"

        Specifies whether to create standby standby DB data access shard for the DB shard group. Valid values are the following:

        • 0 - Creates a DB shard group without a standby DB data access shard. This is the default value.

        • 1 - Creates a DB shard group with a standby DB data access shard in a different Availability Zone (AZ).

        • 2 - Creates a DB shard group with two standby DB data access shard in two different AZs.

        " }, "MaxACU":{ "shape":"DoubleOptional", @@ -5678,6 +5678,10 @@ "documentation":"

        The Active Directory Domain membership records associated with the DB cluster.

        " }, "TagList":{"shape":"TagList"}, + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

        Contains a user-supplied global database cluster identifier. This identifier is the unique key that identifies a global database cluster.

        " + }, "GlobalWriteForwardingStatus":{ "shape":"WriteForwardingStatus", "documentation":"

        The status of write forwarding for a secondary cluster in an Aurora global database.

        " @@ -6849,7 +6853,7 @@ }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

        The open mode of an Oracle read replica. The default is open-read-only. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

        This attribute is only supported in RDS for Oracle.

        " + "documentation":"

        The open mode of a Db2 or an Oracle read replica. The default is open-read-only. For more information, see Working with read replicas for Amazon RDS for Db2 and Working with read replicas for Amazon RDS for Oracle in the Amazon RDS User Guide.

        This attribute is only supported in RDS for Db2, RDS for Oracle, and RDS Custom for Oracle.

        " }, "LicenseModel":{ "shape":"String", @@ -8579,7 +8583,7 @@ }, "Subnets":{ "shape":"SubnetList", - "documentation":"

        Contains a list of Subnet elements.

        " + "documentation":"

        Contains a list of Subnet elements. The list of subnets shown here might not reflect the current state of your VPC. For the most up-to-date information, we recommend checking your VPC configuration directly.

        " }, "DBSubnetGroupArn":{ "shape":"String", @@ -12700,7 +12704,7 @@ }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

        A value that sets the open mode of a replica database to either mounted or read-only.

        Currently, this parameter is only supported for Oracle DB instances.

        Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

        This setting doesn't apply to RDS Custom DB instances.

        " + "documentation":"

        The open mode of a replica database.

        This parameter is only supported for Db2 DB instances and Oracle DB instances.

        Db2

        Standby DB replicas are included in Db2 Advanced Edition (AE) and Db2 Standard Edition (SE). The main use case for standby replicas is cross-Region disaster recovery. Because it doesn't accept user connections, a standby replica can't serve a read-only workload.

        You can create a combination of standby and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Db2 in the Amazon RDS User Guide.

        To create standby DB replicas for RDS for Db2, set this parameter to mounted.

        Oracle

        Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

        You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Oracle in the Amazon RDS User Guide.

        For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually.

        " }, "EnableCustomerOwnedIp":{ "shape":"BooleanOptional", diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 5b000ee68321..126d3dd6c649 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 3139e10380e5..d4b12e92fb10 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 89ef5ea7f71a..807debbc263f 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index 5ef21b4d05db..50260165d593 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index 406bbb458cb0..b31f8191acf0 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 45455d20eca9..220ab5b61595 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -33,7 +33,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

        Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The maximum number of total FaceIds per UserID is 100.

        The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75.

        If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations.

        The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be:

        • ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.

        • CREATED - A UserID has been created, but has no FaceID(s) associated with it.

        • UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.

        " + "documentation":"

        Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The number of FaceIds that can be used as input in a single request is limited to 100.

        Note that the total number of faces that can be associated with a single UserID is also limited to 100. Once a UserID has 100 faces associated with it, no additional faces can be added. If more API calls are made after the limit is reached, a ServiceQuotaExceededException will result.

        The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75.

        If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations.

        The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be:

        • ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.

        • CREATED - A UserID has been created, but has no FaceID(s) associated with it.

        • UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.

        " }, "CompareFaces":{ "name":"CompareFaces", @@ -479,7 +479,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

        This operation applies only to Amazon Rekognition Custom Labels.

        Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

        You specify which version of a model version to use by using the ProjectVersionArn input parameter.

        You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

        For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). Note that for the DetectCustomLabelsLabels operation, Polygons are not returned in the Geometry section of the response.

        To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label.

        This is a stateless API operation. That is, the operation does not persist any data.

        This operation requires permissions to perform the rekognition:DetectCustomLabels action.

        For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        " + "documentation":"

        This operation applies only to Amazon Rekognition Custom Labels.

        Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

        You specify which version of a model version to use by using the ProjectVersionArn input parameter.

        You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

        For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry).

        To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label.

        This is a stateless API operation. That is, the operation does not persist any data.

        This operation requires permissions to perform the rekognition:DetectCustomLabels action.

        For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        " }, "DetectFaces":{ "name":"DetectFaces", @@ -789,7 +789,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Gets the path tracking results of a Amazon Rekognition Video analysis started by StartPersonTracking.

        The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

        To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

        GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

        For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

        By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

        Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

        " + "documentation":"

        End of support notice: On October 31, 2025, AWS will discontinue support for Amazon Rekognition People Pathing. After October 31, 2025, you will no longer be able to use the Rekognition People Pathing capability. For more information, visit this blog post.

        Gets the path tracking results of a Amazon Rekognition Video analysis started by StartPersonTracking.

        The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

        To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

        GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

        For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

        By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

        Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

        " }, "GetSegmentDetection":{ "name":"GetSegmentDetection", @@ -1299,7 +1299,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Starts the asynchronous tracking of a person's path in a stored video.

        Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

        To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        ", + "documentation":"

        End of support notice: On October 31, 2025, AWS will discontinue support for Amazon Rekognition People Pathing. After October 31, 2025, you will no longer be able to use the Rekognition People Pathing capability. For more information, visit this blog post.

        Starts the asynchronous tracking of a person's path in a stored video.

        Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

        To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        ", "idempotent":true }, "StartProjectVersion":{ @@ -1503,8 +1503,7 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You are not authorized to perform the action.

        ", "exception":true }, @@ -1831,6 +1830,50 @@ "type":"list", "member":{"shape":"CelebrityRecognition"} }, + "Challenge":{ + "type":"structure", + "required":[ + "Type", + "Version" + ], + "members":{ + "Type":{ + "shape":"ChallengeType", + "documentation":"

        The type of the challenge being used for the Face Liveness session.

        " + }, + "Version":{ + "shape":"Version", + "documentation":"

        The version of the challenge being used for the Face Liveness session.

        " + } + }, + "documentation":"

        Describes the type and version of the challenge being used for the Face Liveness session.

        " + }, + "ChallengePreference":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"ChallengeType", + "documentation":"

        The types of challenges that have been selected for the Face Liveness session.

        " + }, + "Versions":{ + "shape":"Versions", + "documentation":"

        The version of the challenges that have been selected for the Face Liveness session.

        " + } + }, + "documentation":"

        An ordered list of preferred challenge type and versions.

        " + }, + "ChallengePreferences":{ + "type":"list", + "member":{"shape":"ChallengePreference"} + }, + "ChallengeType":{ + "type":"string", + "enum":[ + "FaceMovementAndLightChallenge", + "FaceMovementChallenge" + ] + }, "ClientRequestToken":{ "type":"string", "max":64, @@ -1973,8 +2016,7 @@ }, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A User with the same Id already exists within the collection, or the update or deletion of the User caused an inconsistent state. **

        ", "exception":true }, @@ -2249,6 +2291,10 @@ "AuditImagesLimit":{ "shape":"AuditImagesLimit", "documentation":"

        Number of audit images to be returned back. Takes an integer between 0-4. Any integer less than 0 will return 0, any integer above 4 will return 4 images in the response. By default, it is set to 0. The limit is best effort and is based on the actual duration of the selfie-video.

        " + }, + "ChallengePreferences":{ + "shape":"ChallengePreferences", + "documentation":"

        Indicates preferred challenge types and versions for the Face Liveness session to be created.

        " } }, "documentation":"

        A session settings object. It contains settings for the operation to be performed. It accepts arguments for OutputConfig and AuditImagesLimit.

        " @@ -2431,8 +2477,7 @@ }, "CreateUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CustomLabel":{ "type":"structure", @@ -2721,8 +2766,7 @@ }, "DeleteDatasetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFacesRequest":{ "type":"structure", @@ -2777,8 +2821,7 @@ }, "DeleteProjectPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteProjectRequest":{ "type":"structure", @@ -2830,8 +2873,7 @@ }, "DeleteStreamProcessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteUserRequest":{ "type":"structure", @@ -2857,8 +2899,7 @@ }, "DeleteUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeCollectionRequest":{ "type":"structure", @@ -3483,8 +3524,7 @@ }, "DistributeDatasetEntriesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DistributeDatasetMetadataList":{ "type":"list", @@ -3542,7 +3582,7 @@ "documentation":"

        Level of confidence in the determination.

        " } }, - "documentation":"

        The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.

        " + "documentation":"

        The API returns a prediction of an emotion based on a person's facial expressions, along with the confidence level for the predicted emotion. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally. The API is not intended to be used, and you may not use it, in a manner that violates the EU Artificial Intelligence Act or any other applicable law.

        " }, "EmotionName":{ "type":"string", @@ -4211,6 +4251,10 @@ "AuditImages":{ "shape":"AuditImages", "documentation":"

        A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. If no Amazon S3 bucket is defined, raw bytes are sent instead.

        " + }, + "Challenge":{ + "shape":"Challenge", + "documentation":"

        Contains information regarding the challenge type used for the Face Liveness check.

        " } } }, @@ -4697,8 +4741,7 @@ }, "IdempotentParameterMismatchException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A ClientRequestToken input parameter was reused with an operation, but at least one of the other input parameters is different from the previous call to the operation.

        ", "exception":true }, @@ -4741,8 +4784,7 @@ }, "ImageTooLargeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment, the image size or resolution exceeds the allowed limit. For more information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.

        ", "exception":true }, @@ -4832,51 +4874,44 @@ }, "InternalServerError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Rekognition experienced a service issue. Try your call again.

        ", "exception":true, "fault":true }, "InvalidImageFormatException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The provided image format is not supported.

        ", "exception":true }, "InvalidManifestException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Indicates that a provided manifest file is empty or larger than the allowed limit.

        ", "exception":true }, "InvalidPaginationTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Pagination token in the request is not valid.

        ", "exception":true }, "InvalidParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Input parameter violated a constraint. Validate your parameter before calling the API operation again.

        ", "exception":true }, "InvalidPolicyRevisionIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The supplied revision id for the project policy is invalid.

        ", "exception":true }, "InvalidS3ObjectException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Rekognition is unable to access the S3 object specified in the request.

        ", "exception":true }, @@ -5149,8 +5184,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs concurrently, subsequent calls to start operations (ex: StartLabelDetection) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit.

        ", "exception":true }, @@ -5503,8 +5537,7 @@ }, "MalformedPolicyDocumentException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The format of the project policy document that you supplied to PutProjectPolicy is incorrect.

        ", "exception":true }, @@ -6319,8 +6352,7 @@ }, "ProvisionedThroughputExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.

        ", "exception":true }, @@ -6444,8 +6476,7 @@ }, "ResourceAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A resource with the specified ID already exists.

        ", "exception":true }, @@ -6456,22 +6487,19 @@ }, "ResourceInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified resource is already being used.

        ", "exception":true }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource specified in the request cannot be found.

        ", "exception":true }, "ResourceNotReadyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The requested resource isn't ready. For example, this exception occurs when you call DetectCustomLabels with a model version that isn't deployed.

        ", "exception":true }, @@ -6843,15 +6871,13 @@ }, "ServiceQuotaExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The size of the collection exceeds the allowed limit. For more information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.

        ", "exception":true }, "SessionNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Occurs when a given sessionId is not found.

        ", "exception":true }, @@ -7355,8 +7381,7 @@ }, "StopStreamProcessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StreamProcessingStartSelector":{ "type":"structure", @@ -7555,8 +7580,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -7683,8 +7707,7 @@ }, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Rekognition is temporarily unable to process the request. Try your call again.

        ", "exception":true, "fault":true @@ -7906,8 +7929,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDatasetEntriesRequest":{ "type":"structure", @@ -7928,8 +7950,7 @@ }, "UpdateDatasetEntriesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateStreamProcessorRequest":{ "type":"structure", @@ -7959,8 +7980,7 @@ }, "UpdateStreamProcessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Url":{"type":"string"}, "Urls":{ @@ -8038,6 +8058,12 @@ }, "documentation":"

        Contains the Amazon S3 bucket location of the validation data for a model training job.

        The validation data includes error information for individual JSON Lines in the dataset. For more information, see Debugging a Failed Model Training in the Amazon Rekognition Custom Labels Developer Guide.

        You get the ValidationData object for the training dataset (TrainingDataResult) and the test dataset (TestingDataResult) by calling DescribeProjectVersions.

        The assets array contains a single Asset object. The GroundTruthManifest field of the Asset object contains the S3 bucket location of the validation data.

        " }, + "Version":{ + "type":"string", + "max":11, + "min":5, + "pattern":"^(0|[1-9]\\d{0,2})\\.(0|[1-9]\\d{0,2})\\.(0|[1-9]\\d{0,2})$" + }, "VersionDescription":{ "type":"string", "max":255, @@ -8056,6 +8082,20 @@ "max":10, "min":1 }, + "Versions":{ + "type":"structure", + "members":{ + "Minimum":{ + "shape":"Version", + "documentation":"

        The desired minimum version for the challenge.

        " + }, + "Maximum":{ + "shape":"Version", + "documentation":"

        The desired maximum version for the challenge.

        " + } + }, + "documentation":"

        Object specifying the acceptable range of challenge versions.

        " + }, "Video":{ "type":"structure", "members":{ @@ -8121,8 +8161,7 @@ }, "VideoTooLargeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The file size or duration of the supplied media is too large. The maximum file size is 10GB. The maximum duration is 6 hours.

        ", "exception":true } diff --git a/services/repostspace/pom.xml b/services/repostspace/pom.xml index c8a1f1afe1d8..b0b0a36447b2 100644 --- a/services/repostspace/pom.xml +++ b/services/repostspace/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT repostspace AWS Java SDK :: Services :: Repostspace diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index 908f247b6566..7e3e03f5de32 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index 9231552cd123..2d29559da5f1 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index 9a0a3d47c3fb..0e5fd0a4d1a2 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index b974d0ab7d22..8173460b1e22 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index d7abac1fa73e..29e78e709880 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index fa5c11edb1a1..65a0bd45684d 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 9aa898ba271a..580e45ddcd02 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53/src/main/resources/codegen-resources/service-2.json b/services/route53/src/main/resources/codegen-resources/service-2.json index 48d67cca03c6..3c6aafe58772 100644 --- a/services/route53/src/main/resources/codegen-resources/service-2.json +++ b/services/route53/src/main/resources/codegen-resources/service-2.json @@ -1523,8 +1523,7 @@ }, "ChangeTagsForResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Empty response for the request.

        " }, "Changes":{ @@ -1800,7 +1799,8 @@ "mx-central-1", "us-isof-south-1", "us-isof-east-1", - "ap-southeast-7" + "ap-southeast-7", + "ap-east-2" ], "max":64, "min":1 @@ -2492,8 +2492,7 @@ }, "DeleteCidrCollectionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteHealthCheckRequest":{ "type":"structure", @@ -2510,8 +2509,7 @@ }, "DeleteHealthCheckResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteHostedZoneRequest":{ @@ -2580,8 +2578,7 @@ }, "DeleteQueryLoggingConfigResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteReusableDelegationSetRequest":{ "type":"structure", @@ -2598,8 +2595,7 @@ }, "DeleteReusableDelegationSetResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteTrafficPolicyInstanceRequest":{ @@ -2617,8 +2613,7 @@ }, "DeleteTrafficPolicyInstanceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteTrafficPolicyRequest":{ @@ -2645,8 +2640,7 @@ }, "DeleteTrafficPolicyResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteVPCAssociationAuthorizationRequest":{ @@ -2671,8 +2665,7 @@ }, "DeleteVPCAssociationAuthorizationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Empty response for the request.

        " }, "Dimension":{ @@ -2967,8 +2960,7 @@ }, "GetCheckerIpRangesRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Empty request.

        " }, "GetCheckerIpRangesResponse":{ @@ -3048,8 +3040,7 @@ }, "GetHealthCheckCountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A request for the number of health checks that are associated with the current Amazon Web Services account.

        " }, "GetHealthCheckCountResponse":{ @@ -3137,8 +3128,7 @@ }, "GetHostedZoneCountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A request to retrieve a count of all the hosted zones that are associated with the current Amazon Web Services account.

        " }, "GetHostedZoneCountResponse":{ @@ -3312,8 +3302,7 @@ }, "GetTrafficPolicyInstanceCountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request to get the number of traffic policy instances that are associated with the current Amazon Web Services account.

        " }, "GetTrafficPolicyInstanceCountResponse":{ @@ -5570,7 +5559,8 @@ "mx-central-1", "ap-southeast-7", "us-gov-east-1", - "us-gov-west-1" + "us-gov-west-1", + "ap-east-2" ], "max":64, "min":1 @@ -6453,7 +6443,8 @@ "mx-central-1", "us-isof-south-1", "us-isof-east-1", - "ap-southeast-7" + "ap-southeast-7", + "ap-east-2" ], "max":64, "min":1 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index dbd613198bff..c7489fc08a44 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53profiles/pom.xml b/services/route53profiles/pom.xml index b26bddd1d033..6834e0d1a73a 100644 --- a/services/route53profiles/pom.xml +++ b/services/route53profiles/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53profiles AWS Java SDK :: Services :: Route53 Profiles diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index bc555b2b5fec..9cbd8b260429 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index 8d7cd0cababf..36f96b374c6f 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index 82bb811193f0..ad464e6571b0 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index b26362aab446..cdb66a893019 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/rum/pom.xml b/services/rum/pom.xml index a3362129a388..eb58803f6316 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/s3/pom.xml b/services/s3/pom.xml index 3aa3a5ad2805..4f63e0fc2d13 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 @@ -110,6 +110,12 @@ checksums-spi ${awsjavasdk.version} + + software.amazon.awssdk + url-connection-client + ${awsjavasdk.version} + test + software.amazon.awssdk.crt aws-crt diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/EmptyFileS3IntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/EmptyFileS3IntegrationTest.java similarity index 97% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/EmptyFileS3IntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/EmptyFileS3IntegrationTest.java index 4a33f114aca0..7de782e87bf6 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/EmptyFileS3IntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/EmptyFileS3IntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import static org.assertj.core.api.Assertions.assertThat; import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/HeadObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/HeadObjectIntegrationTest.java similarity index 97% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/HeadObjectIntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/HeadObjectIntegrationTest.java index 678231eb05bf..fa1720860525 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/HeadObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/HeadObjectIntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/S3WithUrlHttpClientIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/S3WithUrlHttpClientIntegrationTest.java similarity index 98% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/S3WithUrlHttpClientIntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/S3WithUrlHttpClientIntegrationTest.java index c0ed9d162971..41e73f72edb7 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/S3WithUrlHttpClientIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/S3WithUrlHttpClientIntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import static org.assertj.core.api.Assertions.assertThat; import static software.amazon.awssdk.testutils.service.AwsTestBase.CREDENTIALS_PROVIDER_CHAIN; @@ -34,6 +34,7 @@ import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.http.SdkHttpHeaders; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3ClientBuilder; diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java similarity index 97% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java index d184e8b5da4e..497277075ad5 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java @@ -13,11 +13,12 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import java.util.Iterator; import java.util.List; import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3ClientBuilder; diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index 3a28172f165c..ade1acb5e20c 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,6 @@ "errors":[ {"shape":"NoSuchUpload"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", "documentation":"

        This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

        To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.

        • Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to AbortMultipartUpload:

        " }, "CompleteMultipartUpload":{ @@ -38,7 +37,6 @@ }, "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", "documentation":"

        Completes a multipart upload by assembling previously uploaded parts.

        You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded.

        The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

        Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.

        You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response.

        For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

          If you provide an additional checksum value in your MultipartUpload requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt action for the CompleteMultipartUpload request to succeed.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Special errors
        • Error Code: EntityTooSmall

          • Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.

          • HTTP Status Code: 400 Bad Request

        • Error Code: InvalidPart

          • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.

          • HTTP Status Code: 400 Bad Request

        • Error Code: InvalidPartOrder

          • Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.

          • HTTP Status Code: 400 Bad Request

        • Error Code: NoSuchUpload

          • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

          • HTTP Status Code: 404 Not Found

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to CompleteMultipartUpload:

        " }, "CopyObject":{ @@ -52,9 +50,7 @@ "errors":[ {"shape":"ObjectNotInActiveTierError"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", "documentation":"

        Creates a copy of an object that is already stored in Amazon S3.

        You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

        You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

        • Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        • VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

        Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

        Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

        Authentication and authorization

        All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        You must have read access to the source object and write access to the destination bucket.

        • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

          • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

          • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

        • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

          • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

          • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

          For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

        Response and special errors

        When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

        • If the copy is successful, you receive a response with information about the copied object.

        • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

          • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

          • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

            If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

        Charge

        The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

        HTTP Host header syntax
        • Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        • Amazon S3 on Outposts - When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.

        The following operations are related to CopyObject:

        ", - "alias":"PutObjectCopy", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} } @@ -71,9 +67,7 @@ {"shape":"BucketAlreadyExists"}, {"shape":"BucketAlreadyOwnedByYou"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", "documentation":"

        This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket .

        Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

        There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.

        • General purpose buckets - If you send your CreateBucket request to the s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - In addition to the s3:CreateBucket permission, the following permissions are required in a policy when your CreateBucket request includes specific headers:

          • Access control lists (ACLs) - In your CreateBucket request, if you specify an access control list (ACL) and set it to public-read, public-read-write, authenticated-read, or if you explicitly specify any other custom ACLs, both s3:CreateBucket and s3:PutBucketAcl permissions are required. In your CreateBucket request, if you set the ACL to private, or if you don't specify any ACLs, only the s3:CreateBucket permission is required.

          • Object Lock - In your CreateBucket request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

          • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required.

            To set an ACL on a bucket as part of a CreateBucket request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl to set the ACL. If you try to create a bucket with a public ACL, the request will fail.

            For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

          • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

        • Directory bucket permissions - You must have the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

          The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.

          For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to CreateBucket:

        ", - "alias":"PutBucket", "staticContextParams":{ "DisableAccessPoints":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -103,9 +97,7 @@ }, "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

        This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

        After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.

        If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.

        • Directory buckets - S3 Lifecycle is not supported by directory buckets.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Request signing

        For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Encryption
        • General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

          • Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.

            • x-amz-server-side-encryption

            • x-amz-server-side-encryption-aws-kms-key-id

            • x-amz-server-side-encryption-context

            • If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data.

            • To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

            • If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.

            • All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

            For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

          • Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.

            • x-amz-server-side-encryption-customer-algorithm

            • x-amz-server-side-encryption-customer-key

            • x-amz-server-side-encryption-customer-key-MD5

            For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

          For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to CreateMultipartUpload:

        ", - "alias":"InitiateMultipartUpload" + "documentation":"

        This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

        After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.

        If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.

        • Directory buckets - S3 Lifecycle is not supported by directory buckets.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Request signing

        For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Encryption
        • General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

          • Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.

            • x-amz-server-side-encryption

            • x-amz-server-side-encryption-aws-kms-key-id

            • x-amz-server-side-encryption-context

            • If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data.

            • To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

            • If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.

            • All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

            For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

          • Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.

            • x-amz-server-side-encryption-customer-algorithm

            • x-amz-server-side-encryption-customer-key

            • x-amz-server-side-encryption-customer-key-MD5

            For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

          For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to CreateMultipartUpload:

        " }, "CreateSession":{ "name":"CreateSession", @@ -131,7 +123,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html", "documentation":"

        Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.

        • Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - You must have the s3:DeleteBucket permission on the specified bucket in a policy.

        • Directory bucket permissions - You must have the s3express:DeleteBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to DeleteBucket:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -158,7 +149,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketCorsRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", "documentation":"

        This operation is not supported for directory buckets.

        Deletes the cors configuration information set for the bucket.

        To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

        For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

        Related Resources

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -211,7 +201,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketLifecycleRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html", "documentation":"

        Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.

        Permissions
        • General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration permission.

          For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

        • Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.

          For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.

          Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

        For more information about the object expiration, see Elements to Describe Lifecycle Actions.

        Related actions include:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -264,7 +253,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", "documentation":"

        Deletes the policy of a specified bucket.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions

        If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

        If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

        To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.

        • General purpose bucket permissions - The s3:DeleteBucketPolicy permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to DeleteBucketPolicy

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -291,7 +279,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketTaggingRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html", "documentation":"

        This operation is not supported for directory buckets.

        Deletes the tags from the bucket.

        To use this operation, you must have permission to perform the s3:PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

        The following operations are related to DeleteBucketTagging:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -305,7 +292,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketWebsiteRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html", "documentation":"

        This operation is not supported for directory buckets.

        This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

        This DELETE action requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

        For more information about hosting websites, see Hosting Websites on Amazon S3.

        The following operations are related to DeleteBucketWebsite:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -320,7 +306,6 @@ }, "input":{"shape":"DeleteObjectRequest"}, "output":{"shape":"DeleteObjectOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html", "documentation":"

        Removes an object from a bucket. The behavior depends on the bucket's versioning state:

        • If bucket versioning is not enabled, the operation permanently deletes the object.

        • If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.

        • If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.

        • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.

        If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.

        Directory buckets - MFA delete is not supported by directory buckets.

        You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

        Directory buckets - S3 Lifecycle is not supported by directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

          • s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

          • s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following action is related to DeleteObject:

        " }, "DeleteObjectTagging":{ @@ -342,9 +327,7 @@ }, "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", "documentation":"

        This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

        The request can contain a list of up to 1,000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. If the object specified in the request isn't found, Amazon S3 confirms the deletion by returning the result as deleted.

        • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.

        When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.

        Directory buckets - MFA delete is not supported by directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

          • s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission.

          • s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Content-MD5 request header
        • General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

        • Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object Delete requests.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to DeleteObjects:

        ", - "alias":"DeleteMultipleObjects", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -384,7 +367,6 @@ }, "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", "documentation":"

        This operation is not supported for directory buckets.

        This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

        The following operations are related to GetBucketAcl:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -411,7 +393,6 @@ }, "input":{"shape":"GetBucketCorsRequest"}, "output":{"shape":"GetBucketCorsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.

        To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        For more information about CORS, see Enabling Cross-Origin Resource Sharing.

        The following operations are related to GetBucketCors:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -464,7 +445,6 @@ }, "input":{"shape":"GetBucketLifecycleRequest"}, "output":{"shape":"GetBucketLifecycleOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", "documentation":"

        For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

        This operation is not supported for directory buckets.

        Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

        To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

        GetBucketLifecycle has the following special error:

        • Error code: NoSuchLifecycleConfiguration

          • Description: The lifecycle configuration does not exist.

          • HTTP Status Code: 404 Not Found

          • SOAP Fault Code Prefix: Client

        The following operations are related to GetBucketLifecycle:

        ", "deprecated":true, "staticContextParams":{ @@ -492,7 +472,6 @@ }, "input":{"shape":"GetBucketLocationRequest"}, "output":{"shape":"GetBucketLocationOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint request parameter in a CreateBucket request. For more information, see CreateBucket.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        We recommend that you use HeadBucket to return the Region that a bucket resides in. For backward compatibility, Amazon S3 continues to support GetBucketLocation.

        The following operations are related to GetBucketLocation:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -506,7 +485,6 @@ }, "input":{"shape":"GetBucketLoggingRequest"}, "output":{"shape":"GetBucketLoggingOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the logging status of a bucket and the permissions users have to view and modify that status.

        The following operations are related to GetBucketLogging:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -546,7 +524,6 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfigurationDeprecated"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETnotification.html", "documentation":"

        This operation is not supported for directory buckets.

        No longer used, see GetBucketNotificationConfiguration.

        ", "deprecated":true, "staticContextParams":{ @@ -587,7 +564,6 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html", "documentation":"

        Returns the policy of a specified bucket.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions

        If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

        If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

        To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.

        • General purpose bucket permissions - The s3:GetBucketPolicy permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        Example bucket policies

        General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.

        Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following action is related to GetBucketPolicy:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -627,7 +603,6 @@ }, "input":{"shape":"GetBucketRequestPaymentRequest"}, "output":{"shape":"GetBucketRequestPaymentOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.

        The following operations are related to GetBucketRequestPayment:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -641,7 +616,6 @@ }, "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the tag set associated with the bucket.

        To use this operation, you must have permission to perform the s3:GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

        GetBucketTagging has the following special error:

        • Error code: NoSuchTagSet

          • Description: There is no tag set associated with the bucket.

        The following operations are related to GetBucketTagging:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -655,7 +629,6 @@ }, "input":{"shape":"GetBucketVersioningRequest"}, "output":{"shape":"GetBucketVersioningOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the versioning state of a bucket.

        To retrieve the versioning state of a bucket, you must be the bucket owner.

        This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled, the bucket owner must use an authentication device to change the versioning state of the bucket.

        The following operations are related to GetBucketVersioning:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -669,7 +642,6 @@ }, "input":{"shape":"GetBucketWebsiteRequest"}, "output":{"shape":"GetBucketWebsiteOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

        This GET action requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

        The following operations are related to GetBucketWebsite:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -687,7 +659,6 @@ {"shape":"NoSuchKey"}, {"shape":"InvalidObjectState"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", "documentation":"

        Retrieves an object from Amazon S3.

        In the GetObject request, specify the full key name for the object.

        General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the object key name as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide.

        Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named amzn-s3-demo-bucket--usw2-az1--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide.

          If you include a versionId in your request header, you must have the s3:GetObjectVersion permission to access a specific version of an object. The s3:GetObject permission is not required in this scenario.

          If you request the current version of an object without a specific versionId in the request header, only the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario.

          If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.

          • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Access Denied error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Storage classes

        If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide.

        Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request error.

        Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

        Overriding response header values through the request

        There are times when you want to override certain response header values of a GetObject response. For example, you might override the Content-Disposition response header value through your GetObject request.

        You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object.

        The response headers that you can override for the GetObject response are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, and Expires.

        To override values for a set of response headers in the GetObject response, you can use the following query parameters in the request.

        • response-cache-control

        • response-content-disposition

        • response-content-encoding

        • response-content-language

        • response-content-type

        • response-expires

        When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to GetObject:

        ", "httpChecksum":{ "requestValidationModeMember":"ChecksumMode", @@ -711,7 +682,6 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide

        This functionality is not supported for Amazon S3 on Outposts.

        By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

        The following operations are related to GetObjectAcl:

        " }, "GetObjectAttributes":{ @@ -725,7 +695,7 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentation":"

        Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

        GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found (\"no such key\") error.

          • If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden (\"access denied\") error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

        If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

        • x-amz-server-side-encryption-customer-algorithm

        • x-amz-server-side-encryption-customer-key

        • x-amz-server-side-encryption-customer-key-MD5

        For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

        Versioning

        Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        Conditional request headers

        Consider the following when using request headers:

        • If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested:

          • If-Match condition evaluates to true.

          • If-Unmodified-Since condition evaluates to false.

          For more information about conditional requests, see RFC 7232.

        • If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified:

          • If-None-Match condition evaluates to false.

          • If-Modified-Since condition evaluates to true.

          For more information about conditional requests, see RFC 7232.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following actions are related to GetObjectAttributes:

        " + "documentation":"

        Retrieves all of the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

        GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with both of those individual calls can be returned with a single call to GetObjectAttributes.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object.

          The other permissions that you need to use this operation depend on whether the bucket is versioned and if a version ID is passed in the GetObjectAttributes request.

          • If you pass a version ID in your request, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions.

          • If you do not pass a version ID in your request, you need the s3:GetObject and s3:GetObjectAttributes permissions.

          For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

          If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found (\"no such key\") error.

          • If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden (\"access denied\") error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

        If you encrypted an object when you stored the object in Amazon S3 by using server-side encryption with customer-provided encryption keys (SSE-C), then when you retrieve the metadata from the object, you must use the following headers. These headers provide the server with the encryption key required to retrieve the object's metadata. The headers are:

        • x-amz-server-side-encryption-customer-algorithm

        • x-amz-server-side-encryption-customer-key

        • x-amz-server-side-encryption-customer-key-MD5

        For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

        Versioning

        Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        Conditional request headers

        Consider the following when using request headers:

        • If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested:

          • If-Match condition evaluates to true.

          • If-Unmodified-Since condition evaluates to false.

          For more information about conditional requests, see RFC 7232.

        • If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified:

          • If-None-Match condition evaluates to false.

          • If-Modified-Since condition evaluates to true.

          For more information about conditional requests, see RFC 7232.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following actions are related to GetObjectAttributes:

        " }, "GetObjectLegalHold":{ "name":"GetObjectLegalHold", @@ -775,7 +745,6 @@ }, "input":{"shape":"GetObjectTorrentRequest"}, "output":{"shape":"GetObjectTorrentOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files.

        You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

        To use GET, you must have READ access to the object.

        This functionality is not supported for Amazon S3 on Outposts.

        The following action is related to GetObjectTorrent:

        " }, "GetPublicAccessBlock":{ @@ -802,7 +771,6 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", "documentation":"

        You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it.

        If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.

        Authentication and authorization

        General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        " }, "HeadObject":{ @@ -816,7 +784,6 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", "documentation":"

        The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

        A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes.

        Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

        Permissions

        • General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.

          If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.

          • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If you enable x-amz-checksum-mode in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

        If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

        • x-amz-server-side-encryption-customer-algorithm

        • x-amz-server-side-encryption-customer-key

        • x-amz-server-side-encryption-customer-key-MD5

        For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

        Versioning
        • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

        • If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

        • Directory buckets - Delete marker is not supported for directory buckets.

        • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        The following actions are related to HeadObject:

        " }, "ListBucketAnalyticsConfigurations":{ @@ -876,9 +843,7 @@ }, "input":{"shape":"ListBucketsRequest"}, "output":{"shape":"ListBucketsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.

        For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

        We strongly recommend using only paginated ListBuckets requests. Unpaginated ListBuckets requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.

        ", - "alias":"GetService" + "documentation":"

        This operation is not supported for directory buckets.

        Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.

        For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

        We strongly recommend using only paginated ListBuckets requests. Unpaginated ListBuckets requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.

        " }, "ListDirectoryBuckets":{ "name":"ListDirectoryBuckets", @@ -901,7 +866,6 @@ }, "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", "documentation":"

        This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

        Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

        The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

        Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

        For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Sorting of multipart uploads in response
        • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

          • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

          • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

        • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to ListMultipartUploads:

        " }, "ListObjectVersions":{ @@ -912,9 +876,7 @@ }, "input":{"shape":"ListObjectVersionsRequest"}, "output":{"shape":"ListObjectVersionsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

        To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference.

        A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

        To use this operation, you must have READ access to the bucket.

        The following operations are related to ListObjectVersions:

        ", - "alias":"GetBucketObjectVersions" + "documentation":"

        This operation is not supported for directory buckets.

        Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

        To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference.

        A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

        To use this operation, you must have READ access to the bucket.

        The following operations are related to ListObjectVersions:

        " }, "ListObjects":{ "name":"ListObjects", @@ -927,9 +889,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

        This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

        The following operations are related to ListObjects:

        ", - "alias":"GetBucket" + "documentation":"

        This operation is not supported for directory buckets.

        Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

        This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

        The following operations are related to ListObjects:

        " }, "ListObjectsV2":{ "name":"ListObjectsV2", @@ -952,7 +912,6 @@ }, "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html", "documentation":"

        Lists the parts that have been uploaded for a specific multipart upload.

        To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.

        The ListParts request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts requests, include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

        For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

          If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt action for the ListParts request to succeed.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to ListParts:

        " }, "PutBucketAccelerateConfiguration":{ @@ -978,7 +937,6 @@ "requestUri":"/{Bucket}?acl" }, "input":{"shape":"PutBucketAclRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have the WRITE_ACP permission.

        You can use one of the following two ways to set a bucket's permissions:

        • Specify the ACL in the request body

        • Specify permissions using request headers

        You cannot specify access permission using both the body and the request headers.

        Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

        Permissions

        You can set access permissions by using one of the following methods:

        • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

        • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

          You specify each grantee as a type=value pair, where the type is one of the following:

          • id – if the value specified is the canonical user ID of an Amazon Web Services account

          • uri – if you are granting permissions to a predefined group

          • emailAddress – if the value specified is the email address of an Amazon Web Services account

            Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

            • US East (N. Virginia)

            • US West (N. California)

            • US West (Oregon)

            • Asia Pacific (Singapore)

            • Asia Pacific (Sydney)

            • Asia Pacific (Tokyo)

            • Europe (Ireland)

            • South America (São Paulo)

            For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

          For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

          x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

        You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        The following operations are related to PutBucketAcl:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1007,7 +965,6 @@ "requestUri":"/{Bucket}?cors" }, "input":{"shape":"PutBucketCorsRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

        To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

        You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

        To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

        When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

        • The request's Origin header must match AllowedOrigin elements.

        • The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements.

        • Every header specified in the Access-Control-Request-Headers request header of a pre-flight request must match an AllowedHeader element.

        For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

        The following operations are related to PutBucketCors:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1064,7 +1021,6 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", "documentation":"

        This operation is not supported for directory buckets.

        For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

        This operation is not supported for directory buckets.

        Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.

        By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

        You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

        • s3:DeleteObject

        • s3:DeleteObjectVersion

        • s3:PutLifecycleConfiguration

        For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.

        For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

        The following operations are related to PutBucketLifecycle:

        ", "deprecated":true, "httpChecksum":{ @@ -1099,7 +1055,6 @@ "requestUri":"/{Bucket}?logging" }, "input":{"shape":"PutBucketLoggingRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", "documentation":"

        This operation is not supported for directory buckets.

        Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

        The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

        If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways:

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request.

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl request, appears as the CanonicalUser.

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

        <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

        For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

        For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

        The following operations are related to PutBucketLogging:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1128,7 +1083,6 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", "documentation":"

        This operation is not supported for directory buckets.

        No longer used, see the PutBucketNotificationConfiguration operation.

        ", "deprecated":true, "httpChecksum":{ @@ -1159,7 +1113,10 @@ }, "input":{"shape":"PutBucketOwnershipControlsRequest"}, "documentation":"

        This operation is not supported for directory buckets.

        Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.

        For information about Amazon S3 Object Ownership, see Using object ownership.

        The following operations are related to PutBucketOwnershipControls:

        ", - "httpChecksum":{"requestChecksumRequired":true}, + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + }, "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -1171,7 +1128,6 @@ "requestUri":"/{Bucket}?policy" }, "input":{"shape":"PutBucketPolicyRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html", "documentation":"

        Applies an Amazon S3 bucket policy to an Amazon S3 bucket.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions

        If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

        If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

        To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.

        • General purpose bucket permissions - The s3:PutBucketPolicy permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        Example bucket policies

        General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.

        Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to PutBucketPolicy:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1204,7 +1160,6 @@ "requestUri":"/{Bucket}?requestPayment" }, "input":{"shape":"PutBucketRequestPaymentRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

        The following operations are related to PutBucketRequestPayment:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1221,7 +1176,6 @@ "requestUri":"/{Bucket}?tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the tags for a bucket.

        Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.

        When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.

        To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

        PutBucketTagging has the following special errors. For more Amazon S3 errors see, Error Responses.

        • InvalidTag - The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

        • MalformedXML - The XML provided does not match the schema.

        • OperationAborted - A conflicting conditional action is currently in progress against this resource. Please try again.

        • InternalError - The service was unable to apply the provided tag to the bucket.

        The following operations are related to PutBucketTagging:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1238,7 +1192,6 @@ "requestUri":"/{Bucket}?versioning" }, "input":{"shape":"PutBucketVersioningRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", "documentation":"

        This operation is not supported for directory buckets.

        When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket.

        Sets the versioning state of an existing bucket.

        You can set the versioning state with one of the following values:

        Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

        Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

        If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

        In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

        If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

        The following operations are related to PutBucketVersioning:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1255,7 +1208,6 @@ "requestUri":"/{Bucket}?website" }, "input":{"shape":"PutBucketWebsiteRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

        This PUT action requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

        To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

        • WebsiteConfiguration

        • RedirectAllRequestsTo

        • HostName

        • Protocol

        If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

        • WebsiteConfiguration

        • IndexDocument

        • Suffix

        • ErrorDocument

        • Key

        • RoutingRules

        • RoutingRule

        • Condition

        • HttpErrorCodeReturnedEquals

        • KeyPrefixEquals

        • Redirect

        • Protocol

        • HostName

        • ReplaceKeyPrefixWith

        • ReplaceKeyWith

        • HttpRedirectCode

        Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.

        The maximum request length is limited to 128 KB.

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1279,7 +1231,6 @@ {"shape":"TooManyParts"}, {"shape":"EncryptionTypeMismatch"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", "documentation":"

        Adds an object to a bucket.

        • Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

        • If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

        • S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.

          This functionality is not supported for directory buckets.

        • If-None-Match - Uploads the object only if the object key name does not already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 Precondition Failed error. If a conflicting operation occurs during the upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 failure, retry the upload.

          Expects the * character (asterisk).

          For more information, see Add preconditions to S3 operations with conditional requests in the Amazon S3 User Guide or RFC 7232.

          This functionality is not supported for S3 on Outposts.

        • S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

          This functionality is not supported for directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your PutObject request includes specific headers.

          • s3:PutObject - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object to it.

          • s3:PutObjectAcl - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

          • s3:PutObjectTagging - To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Data integrity with Content-MD5
        • General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

        • Directory bucket - This functionality is not supported for directory buckets.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        For more information about related Amazon S3 APIs, see the following:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1297,7 +1248,6 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", "documentation":"

        This operation is not supported for directory buckets.

        Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.

        This functionality is not supported for Amazon S3 on Outposts.

        Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

        Permissions

        You can set access permissions using one of the following methods:

        • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

        • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

          You specify each grantee as a type=value pair, where the type is one of the following:

          • id – if the value specified is the canonical user ID of an Amazon Web Services account

          • uri – if you are granting permissions to a predefined group

          • emailAddress – if the value specified is the email address of an Amazon Web Services account

            Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

            • US East (N. Virginia)

            • US West (N. California)

            • US West (Oregon)

            • Asia Pacific (Singapore)

            • Asia Pacific (Sydney)

            • Asia Pacific (Tokyo)

            • Europe (Ireland)

            • South America (São Paulo)

            For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

          For example, the following x-amz-grant-read header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.

          x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

        You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request.

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        Versioning

        The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

        The following operations are related to PutObjectAcl:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1387,9 +1337,7 @@ "errors":[ {"shape":"ObjectAlreadyInActiveTierError"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", "documentation":"

        This operation is not supported for directory buckets.

        Restores an archived copy of an object back into Amazon S3

        This functionality is not supported for Amazon S3 on Outposts.

        This action performs the following types of requests:

        • restore an archive - Restore an archived object

        For more information about the S3 structure in the request body, see the following:

        Permissions

        To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

        Restoring objects

        Objects that you archive to the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

        To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

        When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

        • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

        • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

        • Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

        For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

        You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

        To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

        After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

        If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

        Responses

        A successful action returns either the 200 OK or 202 Accepted status code.

        • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

        • If the object is previously restored, Amazon S3 returns 200 OK in the response.

        • Special errors:

          • Code: RestoreAlreadyInProgress

          • Cause: Object restore is already in progress.

          • HTTP Status Code: 409 Conflict

          • SOAP Fault Code Prefix: Client

          • Code: GlacierExpeditedRetrievalNotAvailable

          • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

          • HTTP Status Code: 503

          • SOAP Fault Code Prefix: N/A

        The following operations are related to RestoreObject:

        ", - "alias":"PostObjectRestore", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1417,7 +1365,6 @@ }, "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", "documentation":"

        Uploads a part in a multipart upload.

        In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

        You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.

        Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.

        For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

        After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

        For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs.

          These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Data integrity

        General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

        Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.

        Encryption
        • General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).

          Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

          If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.

          • x-amz-server-side-encryption-customer-algorithm

          • x-amz-server-side-encryption-customer-key

          • x-amz-server-side-encryption-customer-key-MD5

          For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms).

        Special errors
        • Error Code: NoSuchUpload

          • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

          • HTTP Status Code: 404 Not Found

          • SOAP Fault Code Prefix: Client

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to UploadPart:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1432,7 +1379,6 @@ }, "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", "documentation":"

        Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request.

        For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

        Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.

        You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.

        For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Authentication and authorization

        All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        You must have READ access to the source object and WRITE access to the destination bucket.

        • General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation.

          • If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied.

          • If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket.

          • To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation.

          • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

          • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination.

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

          For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

        Encryption
        • General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

          For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

          S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

        Special errors
        • Error Code: NoSuchUpload

          • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

          • HTTP Status Code: 404 Not Found

        • Error Code: InvalidRequest

          • Description: The specified copy source is not supported as a byte-range copy source.

          • HTTP Status Code: 400 Bad Request

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to UploadPartCopy:

        ", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} @@ -1730,16 +1676,14 @@ }, "BucketAlreadyExists":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The requested bucket name is not available. The bucket namespace is shared by all users of the system. Select a different name and try again.

        ", "error":{"httpStatusCode":409}, "exception":true }, "BucketAlreadyOwnedByYou":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all Amazon Web Services Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).

        ", "error":{"httpStatusCode":409}, "exception":true @@ -2378,8 +2322,7 @@ "ContentType":{"type":"string"}, "ContinuationEvent":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        ", "event":true }, @@ -2807,7 +2750,7 @@ }, "Location":{ "shape":"LocationInfo", - "documentation":"

        Specifies the location where the bucket will be created.

        Directory buckets - The location type is Availability Zone or Local Zone. To use the Local Zone location type, your account must be enabled for Dedicated Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code AccessDenied. To learn more, see Enable accounts for Dedicated Local Zones in the Amazon S3 User Guide.

        This functionality is only supported by directory buckets.

        " + "documentation":"

        Specifies the location where the bucket will be created.

        Directory buckets - The location type is Availability Zone or Local Zone. To use the Local Zone location type, your account must be enabled for Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code AccessDenied. To learn more, see Enable accounts for Local Zones in the Amazon S3 User Guide.

        This functionality is only supported by directory buckets.

        " }, "Bucket":{ "shape":"BucketInfo", @@ -4031,8 +3974,7 @@ }, "EncryptionTypeMismatch":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The existing object was created with a different encryption type. Subsequent write requests must include the appropriate encryption parameters in the request or while creating the session.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -4043,8 +3985,7 @@ }, "EndEvent":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A message that indicates the request is complete and no more messages will be sent. You should not assume that the request is complete until the client receives an EndEvent.

        ", "event":true }, @@ -4137,8 +4078,7 @@ }, "EventBridgeConfiguration":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A container for specifying the configuration for Amazon EventBridge.

        " }, "EventList":{ @@ -5099,13 +5039,13 @@ }, "MaxParts":{ "shape":"MaxParts", - "documentation":"

        Sets the maximum number of parts to return.

        ", + "documentation":"

        Sets the maximum number of parts to return. For more information, see Uploading and copying objects using multipart upload in Amazon S3 in the Amazon Simple Storage Service user guide.

        ", "location":"header", "locationName":"x-amz-max-parts" }, "PartNumberMarker":{ "shape":"PartNumberMarker", - "documentation":"

        Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.

        ", + "documentation":"

        Specifies the part after which listing should begin. Only parts with higher part numbers will be listed. For more information, see Uploading and copying objects using multipart upload in Amazon S3 in the Amazon Simple Storage Service user guide.

        ", "location":"header", "locationName":"x-amz-part-number-marker" }, @@ -6423,16 +6363,14 @@ }, "InvalidRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You may receive this error in multiple cases. Depending on the reason for the error, you may receive one of the messages below:

        • Cannot specify both a write offset value and user-defined object metadata for existing objects.

        • Checksum Type mismatch occurred, expected checksum Type: sha1, actual checksum Type: crc32c.

        • Request body cannot be empty when 'write offset' is specified.

        ", "error":{"httpStatusCode":400}, "exception":true }, "InvalidWriteOffset":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The write offset value that you specified does not match the current object size.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -6745,7 +6683,7 @@ }, "Filter":{ "shape":"LifecycleRuleFilter", - "documentation":"

        The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified. Filter is required if the LifecycleRule does not contain a Prefix element.

        Tag filters are not supported for directory buckets.

        " + "documentation":"

        The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan, or And specified. Filter is required if the LifecycleRule does not contain a Prefix element.

        Tag filters are not supported for directory buckets.

        " }, "Status":{ "shape":"ExpirationStatus", @@ -8001,24 +7939,21 @@ "NextVersionIdMarker":{"type":"string"}, "NoSuchBucket":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified bucket does not exist.

        ", "error":{"httpStatusCode":404}, "exception":true }, "NoSuchKey":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified key does not exist.

        ", "error":{"httpStatusCode":404}, "exception":true }, "NoSuchUpload":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified multipart upload does not exist.

        ", "error":{"httpStatusCode":404}, "exception":true @@ -8160,8 +8095,7 @@ }, "ObjectAlreadyInActiveTierError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        This action is not allowed against this storage tier.

        ", "error":{"httpStatusCode":403}, "exception":true @@ -8317,8 +8251,7 @@ "ObjectLockToken":{"type":"string"}, "ObjectNotInActiveTierError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The source object of the COPY action is not in the active tier and is only stored in Amazon S3 Glacier.

        ", "error":{"httpStatusCode":403}, "exception":true @@ -8531,8 +8464,7 @@ }, "ParquetInput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Container for Parquet.

        " }, "Part":{ @@ -9270,6 +9202,12 @@ "documentation":"

        The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3 bucket.

        ", "locationName":"OwnershipControls", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

        Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide.

        If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter.

        ", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" } }, "payload":"OwnershipControls" @@ -10609,7 +10547,7 @@ }, "RequestCharged":{ "type":"string", - "documentation":"

        If present, indicates that the requester was successfully charged for the request.

        This functionality is not supported for directory buckets.

        ", + "documentation":"

        If present, indicates that the requester was successfully charged for the request. For more information, see Using Requester Pays buckets for storage transfers and usage in the Amazon Simple Storage Service user guide.

        This functionality is not supported for directory buckets.

        ", "enum":["requester"] }, "RequestPayer":{ @@ -10959,8 +10897,7 @@ }, "SSES3":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the use of SSE-S3 to encrypt delivered inventory reports.

        ", "locationName":"SSE-S3" }, @@ -11222,8 +11159,7 @@ }, "SimplePrefix":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        To use simple format for S3 keys for log objects, set SimplePrefix to an empty object.

        [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]

        ", "locationName":"SimplePrefix" }, @@ -11472,8 +11408,7 @@ "Token":{"type":"string"}, "TooManyParts":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You have attempted to add more parts than the maximum of 10000 that are allowed for this object. You can use the CopyObject operation to copy this object to another and then add more data to the newly copied object.

        ", "error":{"httpStatusCode":400}, "exception":true diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 8ab86520146c..ef4a72ce8ba9 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index c117b95fe8a4..f410d512a8fd 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/s3tables/pom.xml b/services/s3tables/pom.xml index 8021514afb13..fd2e26c64395 100644 --- a/services/s3tables/pom.xml +++ b/services/s3tables/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT s3tables AWS Java SDK :: Services :: S3 Tables diff --git a/services/s3tables/src/main/resources/codegen-resources/service-2.json b/services/s3tables/src/main/resources/codegen-resources/service-2.json index a60c1404d504..281f0dcbdac2 100644 --- a/services/s3tables/src/main/resources/codegen-resources/service-2.json +++ b/services/s3tables/src/main/resources/codegen-resources/service-2.json @@ -49,7 +49,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

        Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.

        Permissions
        • You must have the s3tables:CreateTable permission to use this operation.

        • If you use this operation with the optional metadata request parameter you must have the s3tables:PutTableData permission.

        • If you use this operation with the optional encryptionConfiguration request parameter you must have the s3tables:PutTableEncryption permission.

        Additionally,

        " + "documentation":"

        Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.

        Permissions
        • You must have the s3tables:CreateTable permission to use this operation.

        • If you use this operation with the optional metadata request parameter you must have the s3tables:PutTableData permission.

        • If you use this operation with the optional encryptionConfiguration request parameter you must have the s3tables:PutTableEncryption permission.

        Additionally, If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption.

        " }, "CreateTableBucket":{ "name":"CreateTableBucket", @@ -208,7 +208,7 @@ "name":"GetTable", "http":{ "method":"GET", - "requestUri":"/tables/{tableBucketARN}/{namespace}/{name}", + "requestUri":"/get-table", "responseCode":200 }, "input":{"shape":"GetTableRequest"}, @@ -471,7 +471,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

        Sets the encryption configuration for a table bucket.

        Permissions

        You must have the s3tables:PutTableBucketEncryption permission to use this operation.

        If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption

        ", + "documentation":"

        Sets the encryption configuration for a table bucket.

        Permissions

        You must have the s3tables:PutTableBucketEncryption permission to use this operation.

        If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption in the Amazon Simple Storage Service User Guide.

        ", "idempotent":true }, "PutTableBucketMaintenanceConfiguration":{ @@ -1179,7 +1179,7 @@ }, "namespace":{ "shape":"NamespaceName", - "documentation":"

        The name of the namespace the table is associated with.

        </p> 
        ", + "documentation":"

        The name of the namespace the table is associated with.

        ", "location":"uri", "locationName":"namespace" }, @@ -1297,29 +1297,30 @@ }, "GetTableRequest":{ "type":"structure", - "required":[ - "tableBucketARN", - "namespace", - "name" - ], "members":{ "tableBucketARN":{ "shape":"TableBucketARN", "documentation":"

        The Amazon Resource Name (ARN) of the table bucket associated with the table.

        ", - "location":"uri", + "location":"querystring", "locationName":"tableBucketARN" }, "namespace":{ "shape":"NamespaceName", "documentation":"

        The name of the namespace the table is associated with.

        ", - "location":"uri", + "location":"querystring", "locationName":"namespace" }, "name":{ "shape":"TableName", "documentation":"

        The name of the table.

        ", - "location":"uri", + "location":"querystring", "locationName":"name" + }, + "tableArn":{ + "shape":"TableARN", + "documentation":"

        The Amazon Resource Name (ARN) of the table.

        ", + "location":"querystring", + "locationName":"tableArn" } } }, diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index feedd72f1d14..99b07319c32f 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index cd292b26a09e..33421aff6691 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -7023,6 +7023,10 @@ }, "documentation":"

        The SageMaker Canvas application settings.

        " }, + "CapacityReservationPreference":{ + "type":"string", + "enum":["capacity-reservations-only"] + }, "CapacitySize":{ "type":"structure", "required":[ @@ -16578,6 +16582,10 @@ "shape":"TrackingServerStatus", "documentation":"

        The current creation status of the described MLflow Tracking Server.

        " }, + "TrackingServerMaintenanceStatus":{ + "shape":"TrackingServerMaintenanceStatus", + "documentation":"

        The current maintenance status of the described MLflow Tracking Server.

        " + }, "IsActive":{ "shape":"IsTrackingServerActive", "documentation":"

        Whether the described MLflow Tracking Server is currently active.

        " @@ -19322,6 +19330,33 @@ }, "documentation":"

        A collection of EBS storage settings that apply to both private and shared spaces.

        " }, + "Ec2CapacityReservation":{ + "type":"structure", + "members":{ + "Ec2CapacityReservationId":{ + "shape":"Ec2CapacityReservationId", + "documentation":"

        The unique identifier for an EC2 capacity reservation that's part of the ML capacity reservation.

        " + }, + "TotalInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that you allocated to the EC2 capacity reservation.

        " + }, + "AvailableInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that are currently available in the EC2 capacity reservation.

        " + }, + "UsedByCurrentEndpoint":{ + "shape":"TaskCount", + "documentation":"

        The number of instances from the EC2 capacity reservation that are being used by the endpoint.

        " + } + }, + "documentation":"

        The EC2 capacity reservations that are shared to an ML capacity reservation.

        " + }, + "Ec2CapacityReservationId":{"type":"string"}, + "Ec2CapacityReservationsList":{ + "type":"list", + "member":{"shape":"Ec2CapacityReservation"} + }, "Edge":{ "type":"structure", "members":{ @@ -29338,6 +29373,12 @@ "min":0, "pattern":"1|2" }, + "MlReservationArn":{ + "type":"string", + "max":258, + "min":20, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:ml-reservation/.*" + }, "MlTools":{ "type":"string", "enum":[ @@ -33886,7 +33927,34 @@ "ml.c6i.12xlarge", "ml.c6i.16xlarge", "ml.c6i.24xlarge", - "ml.c6i.32xlarge" + "ml.c6i.32xlarge", + "ml.m7i.large", + "ml.m7i.xlarge", + "ml.m7i.2xlarge", + "ml.m7i.4xlarge", + "ml.m7i.8xlarge", + "ml.m7i.12xlarge", + "ml.m7i.16xlarge", + "ml.m7i.24xlarge", + "ml.m7i.48xlarge", + "ml.c7i.large", + "ml.c7i.xlarge", + "ml.c7i.2xlarge", + "ml.c7i.4xlarge", + "ml.c7i.8xlarge", + "ml.c7i.12xlarge", + "ml.c7i.16xlarge", + "ml.c7i.24xlarge", + "ml.c7i.48xlarge", + "ml.r7i.large", + "ml.r7i.xlarge", + "ml.r7i.2xlarge", + "ml.r7i.4xlarge", + "ml.r7i.8xlarge", + "ml.r7i.12xlarge", + "ml.r7i.16xlarge", + "ml.r7i.24xlarge", + "ml.r7i.48xlarge" ] }, "ProcessingJob":{ @@ -34301,6 +34369,10 @@ "InferenceAmiVersion":{ "shape":"ProductionVariantInferenceAmiVersion", "documentation":"

        Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

        By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

        The AMI version names, and their configurations, are the following:

        al2-ami-sagemaker-inference-gpu-2
        • Accelerator: GPU

        • NVIDIA driver version: 535

        • CUDA version: 12.2

        al2-ami-sagemaker-inference-gpu-2-1
        • Accelerator: GPU

        • NVIDIA driver version: 535

        • CUDA version: 12.2

        • NVIDIA Container Toolkit with disabled CUDA-compat mounting

        al2-ami-sagemaker-inference-gpu-3-1
        • Accelerator: GPU

        • NVIDIA driver version: 550

        • CUDA version: 12.4

        • NVIDIA Container Toolkit with disabled CUDA-compat mounting

        al2-ami-sagemaker-inference-neuron-2
        • Accelerator: Inferentia2 and Trainium

        • Neuron driver version: 2.19

        " + }, + "CapacityReservationConfig":{ + "shape":"ProductionVariantCapacityReservationConfig", + "documentation":"

        Settings for the capacity reservation for the compute instances that SageMaker AI reserves for an endpoint.

        " } }, "documentation":"

        Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants.

        " @@ -34316,6 +34388,50 @@ "ml.eia2.xlarge" ] }, + "ProductionVariantCapacityReservationConfig":{ + "type":"structure", + "members":{ + "CapacityReservationPreference":{ + "shape":"CapacityReservationPreference", + "documentation":"

        Options that you can choose for the capacity reservation. SageMaker AI supports the following options:

        capacity-reservations-only

        SageMaker AI launches instances only into an ML capacity reservation. If no capacity is available, the instances fail to launch.

        " + }, + "MlReservationArn":{ + "shape":"MlReservationArn", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies the ML capacity reservation that SageMaker AI applies when it deploys the endpoint.

        " + } + }, + "documentation":"

        Settings for the capacity reservation for the compute instances that SageMaker AI reserves for an endpoint.

        " + }, + "ProductionVariantCapacityReservationSummary":{ + "type":"structure", + "members":{ + "MlReservationArn":{ + "shape":"MlReservationArn", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies the ML capacity reservation that SageMaker AI applies when it deploys the endpoint.

        " + }, + "CapacityReservationPreference":{ + "shape":"CapacityReservationPreference", + "documentation":"

        The option that you chose for the capacity reservation. SageMaker AI supports the following options:

        capacity-reservations-only

        SageMaker AI launches instances only into an ML capacity reservation. If no capacity is available, the instances fail to launch.

        " + }, + "TotalInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that you allocated to the ML capacity reservation.

        " + }, + "AvailableInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that are currently available in the ML capacity reservation.

        " + }, + "UsedByCurrentEndpoint":{ + "shape":"TaskCount", + "documentation":"

        The number of instances from the ML capacity reservation that are being used by the endpoint.

        " + }, + "Ec2CapacityReservations":{ + "shape":"Ec2CapacityReservationsList", + "documentation":"

        The EC2 capacity reservations that are shared to this ML capacity reservation, if any.

        " + } + }, + "documentation":"

        Details about an ML capacity reservation.

        " + }, "ProductionVariantContainerStartupHealthCheckTimeoutInSeconds":{ "type":"integer", "box":true, @@ -34572,7 +34688,46 @@ "ml.r7i.12xlarge", "ml.r7i.16xlarge", "ml.r7i.24xlarge", - "ml.r7i.48xlarge" + "ml.r7i.48xlarge", + "ml.c8g.medium", + "ml.c8g.large", + "ml.c8g.xlarge", + "ml.c8g.2xlarge", + "ml.c8g.4xlarge", + "ml.c8g.8xlarge", + "ml.c8g.12xlarge", + "ml.c8g.16xlarge", + "ml.c8g.24xlarge", + "ml.c8g.48xlarge", + "ml.r7gd.medium", + "ml.r7gd.large", + "ml.r7gd.xlarge", + "ml.r7gd.2xlarge", + "ml.r7gd.4xlarge", + "ml.r7gd.8xlarge", + "ml.r7gd.12xlarge", + "ml.r7gd.16xlarge", + "ml.m8g.medium", + "ml.m8g.large", + "ml.m8g.xlarge", + "ml.m8g.2xlarge", + "ml.m8g.4xlarge", + "ml.m8g.8xlarge", + "ml.m8g.12xlarge", + "ml.m8g.16xlarge", + "ml.m8g.24xlarge", + "ml.m8g.48xlarge", + "ml.c6in.large", + "ml.c6in.xlarge", + "ml.c6in.2xlarge", + "ml.c6in.4xlarge", + "ml.c6in.8xlarge", + "ml.c6in.12xlarge", + "ml.c6in.16xlarge", + "ml.c6in.24xlarge", + "ml.c6in.32xlarge", + "ml.p6-b200.48xlarge", + "ml.p6e-gb200.36xlarge" ] }, "ProductionVariantList":{ @@ -34728,6 +34883,10 @@ "RoutingConfig":{ "shape":"ProductionVariantRoutingConfig", "documentation":"

        Settings that control how the endpoint routes incoming traffic to the instances that the endpoint hosts.

        " + }, + "CapacityReservationConfig":{ + "shape":"ProductionVariantCapacityReservationSummary", + "documentation":"

        Settings for the capacity reservation for the compute instances that SageMaker AI reserves for an endpoint.

        " } }, "documentation":"

        Describes weight and capacities for a production variant associated with an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities API and the endpoint status is Updating, you get different desired and current values.

        " @@ -36085,7 +36244,7 @@ "ReservedCapacityInstanceCount":{ "type":"integer", "max":256, - "min":1 + "min":0 }, "ReservedCapacityInstanceType":{ "type":"string", @@ -36095,7 +36254,8 @@ "ml.p5e.48xlarge", "ml.p5en.48xlarge", "ml.trn1.32xlarge", - "ml.trn2.48xlarge" + "ml.trn2.48xlarge", + "ml.p6-b200.48xlarge" ] }, "ReservedCapacityOffering":{ @@ -36641,7 +36801,7 @@ "members":{ "S3DataType":{ "shape":"S3DataType", - "documentation":"

        If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.

        If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

        If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe.

        " + "documentation":"

        If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.

        If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

        If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe.

        If you choose Converse, S3Uri identifies an Amazon S3 location that contains data formatted according to Converse format. This format structures conversational messages with specific roles and content types used for training and fine-tuning foundational models.

        " }, "S3Uri":{ "shape":"S3Uri", @@ -36672,7 +36832,8 @@ "enum":[ "ManifestFile", "S3Prefix", - "AugmentedManifestFile" + "AugmentedManifestFile", + "Converse" ] }, "S3ModelDataSource":{ @@ -39146,6 +39307,14 @@ "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-tracking-server/.*" }, + "TrackingServerMaintenanceStatus":{ + "type":"string", + "enum":[ + "MaintenanceInProgress", + "MaintenanceComplete", + "MaintenanceFailed" + ] + }, "TrackingServerName":{ "type":"string", "max":256, @@ -39472,7 +39641,35 @@ "ml.r5.8xlarge", "ml.r5.12xlarge", "ml.r5.16xlarge", - "ml.r5.24xlarge" + "ml.r5.24xlarge", + "ml.p6-b200.48xlarge", + "ml.m7i.large", + "ml.m7i.xlarge", + "ml.m7i.2xlarge", + "ml.m7i.4xlarge", + "ml.m7i.8xlarge", + "ml.m7i.12xlarge", + "ml.m7i.16xlarge", + "ml.m7i.24xlarge", + "ml.m7i.48xlarge", + "ml.c7i.large", + "ml.c7i.xlarge", + "ml.c7i.2xlarge", + "ml.c7i.4xlarge", + "ml.c7i.8xlarge", + "ml.c7i.12xlarge", + "ml.c7i.16xlarge", + "ml.c7i.24xlarge", + "ml.c7i.48xlarge", + "ml.r7i.large", + "ml.r7i.xlarge", + "ml.r7i.2xlarge", + "ml.r7i.4xlarge", + "ml.r7i.8xlarge", + "ml.r7i.12xlarge", + "ml.r7i.16xlarge", + "ml.r7i.24xlarge", + "ml.r7i.48xlarge" ] }, "TrainingInstanceTypes":{ @@ -41149,6 +41346,10 @@ "ProjectS3Path":{ "shape":"S3Uri", "documentation":"

        The location where Amazon S3 stores temporary execution data and other artifacts for the project that corresponds to the domain.

        " + }, + "SingleSignOnApplicationArn":{ + "shape":"SingleSignOnApplicationArn", + "documentation":"

        The ARN of the application managed by SageMaker AI and SageMaker Unified Studio in the Amazon Web Services IAM Identity Center.

        " } }, "documentation":"

        The settings that apply to an Amazon SageMaker AI domain when you use it in Amazon SageMaker Unified Studio.

        " diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index d1f7646538c7..227332fb25eb 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index 681872ebefb3..6526c50a50cf 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index 97e64ba71b61..910cec7302d9 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 10684cac0ec2..53cb5c8699ca 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index a6e624f4072d..74bba7369190 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index a6f46bed6649..f03df880133a 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 111e0472a0d4..f5ae26b31802 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index 77b086aa1da6..8906da8be1e1 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 95cbf4df9ccb..ff527356ae59 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index ab493a745da4..11aebb95af78 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 8b28a91b1c72..673bd638638f 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityir/pom.xml b/services/securityir/pom.xml index 557792e01a62..2c18929132de 100644 --- a/services/securityir/pom.xml +++ b/services/securityir/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT securityir AWS Java SDK :: Services :: Security IR diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index 31839705d15a..ffec9404c36b 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index cd6bd1facf64..f0d2a89c0ee6 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 22da8ad2657a..627b30979b23 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index bab190a0d3f9..a6597f9ddd87 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index e11be6fdcfc4..cb3b7abe7932 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index b00ddeab9021..5b572363e157 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index ccbe9f7b8c57..4685ce337a67 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index 1d161532a489..ca10d4737798 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 2ad22b43642c..a50f4117ccc6 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 8cac7cb32655..95832f3d7014 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 219a63c52a76..5fb1d075bf31 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 1da106a9c8dc..99d0725179e3 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 7b46f40f2dd9..62768a55d516 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index 89b04ced5cc3..74eeded92676 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index bc03890c09e3..fe6dea258127 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index c8f982d7f6f0..dffde232dade 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/socialmessaging/pom.xml b/services/socialmessaging/pom.xml index cdd5d2d05aed..f4c043b2ac42 100644 --- a/services/socialmessaging/pom.xml +++ b/services/socialmessaging/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT socialmessaging AWS Java SDK :: Services :: Social Messaging diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 91d5e984e97d..b55c94131b63 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 9fff3320b3f7..b603222cf9d5 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 9b1c3fe1f3e4..cdbb5109cd01 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmguiconnect/pom.xml b/services/ssmguiconnect/pom.xml index d06ea4cf3f31..f09ac02ca463 100644 --- a/services/ssmguiconnect/pom.xml +++ b/services/ssmguiconnect/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssmguiconnect AWS Java SDK :: Services :: SSM Gui Connect diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index 26895c1687d7..a9d7cd482386 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmquicksetup/pom.xml b/services/ssmquicksetup/pom.xml index ca0da7dd4f9f..23f1aa4e0637 100644 --- a/services/ssmquicksetup/pom.xml +++ b/services/ssmquicksetup/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssmquicksetup AWS Java SDK :: Services :: SSM Quick Setup diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index ce979138631f..a21717afb0a3 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index 96bf9e395b24..3f13a26baa43 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index 5a27ffbcdf27..7ff27028fe5a 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 53b1ac4f69a9..60d9f3a4f693 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index c0de9d6f4ed3..f02669f73352 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 04bad5947dc2..5001fd41d2e9 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/supplychain/pom.xml b/services/supplychain/pom.xml index b3c98171c1da..b823d79cd778 100644 --- a/services/supplychain/pom.xml +++ b/services/supplychain/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT supplychain AWS Java SDK :: Services :: Supply Chain diff --git a/services/support/pom.xml b/services/support/pom.xml index fe81c88ffaec..5a54c9ce1132 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index fd0ca008e181..def08a9762bf 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 49d8eb9764c1..862a75ea1084 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index 412cba851624..fcfc9be355a8 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/synthetics/src/main/resources/codegen-resources/service-2.json b/services/synthetics/src/main/resources/codegen-resources/service-2.json index 0df3de3922c6..06779ae8d16d 100644 --- a/services/synthetics/src/main/resources/codegen-resources/service-2.json +++ b/services/synthetics/src/main/resources/codegen-resources/service-2.json @@ -551,26 +551,26 @@ "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

        If your canary script is located in S3, specify the bucket name here. Do not include s3:// as the start of the bucket name.

        " + "documentation":"

        If your canary script is located in Amazon S3, specify the bucket name here. Do not include s3:// as the start of the bucket name.

        " }, "S3Key":{ "shape":"String", - "documentation":"

        The S3 key of your script. For more information, see Working with Amazon S3 Objects.

        " + "documentation":"

        The Amazon S3 key of your script. For more information, see Working with Amazon S3 Objects.

        " }, "S3Version":{ "shape":"String", - "documentation":"

        The S3 version ID of your script.

        " + "documentation":"

        The Amazon S3 version ID of your script.

        " }, "ZipFile":{ "shape":"Blob", - "documentation":"

        If you input your canary script directly into the canary instead of referring to an S3 location, the value of this parameter is the base64-encoded contents of the .zip file that contains the script. It must be smaller than 225 Kb.

        For large canary scripts, we recommend that you use an S3 location instead of inputting it directly with this parameter.

        " + "documentation":"

        If you input your canary script directly into the canary instead of referring to an Amazon S3 location, the value of this parameter is the base64-encoded contents of the .zip file that contains the script. It must be smaller than 225 Kb.

        For large canary scripts, we recommend that you use an Amazon S3 location instead of inputting it directly with this parameter.

        " }, "Handler":{ "shape":"CodeHandler", "documentation":"

        The entry point to use for the source code when running the canary. For canaries that use the syn-python-selenium-1.0 runtime or a syn-nodejs.puppeteer runtime earlier than syn-nodejs.puppeteer-3.4, the handler must be specified as fileName.handler. For syn-python-selenium-1.1, syn-nodejs.puppeteer-3.4, and later runtimes, the handler can be specified as fileName.functionName , or you can specify a folder where canary scripts reside as folder/fileName.functionName .

        " } }, - "documentation":"

        Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

        If you are uploading your canary scripts with an Amazon S3 bucket, your zip file should include your script in a certain folder structure.

        " + "documentation":"

        Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an Amazon S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

        If you are uploading your canary scripts with an Amazon S3 bucket, your zip file should include your script in a certain folder structure.

        " }, "CanaryCodeOutput":{ "type":"structure", @@ -672,6 +672,10 @@ "EnvironmentVariables":{ "shape":"EnvironmentVariablesMap", "documentation":"

        Specifies the keys and values to use for any environment variables used in the canary script. Use the following format:

        { \"key1\" : \"value1\", \"key2\" : \"value2\", ...}

        Keys must start with a letter and be at least two characters. The total size of your environment variables cannot exceed 4 KB. You can't specify any Lambda reserved environment variables as the keys for your environment variables. For more information about reserved keys, see Runtime environment variables.

        Environment variable keys and values are encrypted at rest using Amazon Web Services owned KMS keys. However, the environment variables are not encrypted on the client side. Do not store sensitive information in them.

        " + }, + "EphemeralStorage":{ + "shape":"EphemeralStorageSize", + "documentation":"

        Specifies the amount of ephemeral storage (in MB) to allocate for the canary run during execution. This temporary storage is used for storing canary run artifacts (which are uploaded to an Amazon S3 bucket at the end of the run), and any canary browser operations. This temporary storage is cleared after the run is completed. Default storage value is 1024 MB.

        " } }, "documentation":"

        A structure that contains input information for a canary run.

        " @@ -690,6 +694,10 @@ "ActiveTracing":{ "shape":"NullableBoolean", "documentation":"

        Displays whether this canary run used active X-Ray tracing.

        " + }, + "EphemeralStorage":{ + "shape":"EphemeralStorageSize", + "documentation":"

        Specifies the amount of ephemeral storage (in MB) to allocate for the canary run during execution. This temporary storage is used for storing canary run artifacts (which are uploaded to an Amazon S3 bucket at the end of the run), and any canary browser operations. This temporary storage is cleared after the run is completed. Default storage value is 1024 MB.

        " } }, "documentation":"

        A structure that contains information about a canary run.

        " @@ -722,11 +730,23 @@ }, "StateReasonCode":{ "shape":"CanaryRunStateReasonCode", - "documentation":"

        If this value is CANARY_FAILURE, an exception occurred in the canary code. If this value is EXECUTION_FAILURE, an exception occurred in CloudWatch Synthetics.

        " + "documentation":"

        If this value is CANARY_FAILURE, either the canary script failed or Synthetics ran into a fatal error when running the canary. For example, a canary timeout misconfiguration setting can cause the canary to timeout before Synthetics can evaluate its status.

        If this value is EXECUTION_FAILURE, a non-critical failure occurred such as failing to save generated debug artifacts (for example, screenshots or har files).

        If both types of failures occurred, the CANARY_FAILURE takes precedence. To understand the exact error, use the StateReason API.

        " + }, + "TestResult":{ + "shape":"CanaryRunTestResult", + "documentation":"

        Specifies the status of canary script for this run. When Synthetics tries to determine the status but fails, the result is marked as UNKNOWN. For the overall status of canary run, see State.

        " } }, "documentation":"

        This structure contains the status information about a canary run.

        " }, + "CanaryRunTestResult":{ + "type":"string", + "enum":[ + "PASSED", + "FAILED", + "UNKNOWN" + ] + }, "CanaryRunTimeline":{ "type":"structure", "members":{ @@ -861,7 +881,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([0-9a-zA-Z_-]+\\/)*[0-9A-Za-z_\\\\-]+\\.[A-Za-z_][A-Za-z0-9_]*$" + "pattern":"^([0-9a-zA-Z_-]+(\\/|\\.))*[0-9A-Za-z_\\\\-]+(\\.|::)[A-Za-z_][A-Za-z0-9_]*$" }, "ConflictException":{ "type":"structure", @@ -889,11 +909,11 @@ }, "Code":{ "shape":"CanaryCodeInput", - "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

        " + "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an Amazon S3 bucket, the bucket name, key, and version are also included.

        " }, "ArtifactS3Location":{ "shape":"String", - "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the S3 bucket can't include a period (.).

        " + "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the Amazon S3 bucket can't include a period (.).

        " }, "ExecutionRoleArn":{ "shape":"RoleArn", @@ -1163,6 +1183,11 @@ "key":{"shape":"EnvironmentVariableName"}, "value":{"shape":"EnvironmentVariableValue"} }, + "EphemeralStorageSize":{ + "type":"integer", + "max":5120, + "min":1024 + }, "ErrorMessage":{"type":"string"}, "FunctionArn":{ "type":"string", @@ -1679,11 +1704,11 @@ }, "SuccessRetentionPeriodInDays":{ "shape":"MaxSize1024", - "documentation":"

        The number of days to retain data on the failed runs for this canary. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " + "documentation":"

        The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " }, "FailureRetentionPeriodInDays":{ "shape":"MaxSize1024", - "documentation":"

        The number of days to retain data on the failed runs for this canary. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " + "documentation":"

        The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " }, "VisualReference":{"shape":"VisualReferenceInput"}, "ArtifactS3Location":{ @@ -1693,7 +1718,7 @@ "ArtifactConfig":{"shape":"ArtifactConfigInput"}, "ProvisionedResourceCleanup":{ "shape":"ProvisionedResourceCleanupSetting", - "documentation":"

        Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If the value of this parameter is AUTOMATIC, it means that the Lambda functions and layers will be deleted when the canary is deleted.

        If the value of this parameter is OFF, then the value of the DeleteLambda parameter of the DeleteCanary operation determines whether the Lambda functions and layers will be deleted.

        " + "documentation":"

        Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If you omit this parameter, the default of AUTOMATIC is used, which means that the Lambda functions and layers will be deleted when the canary is deleted.

        If the value of this parameter is OFF, then the value of the DeleteLambda parameter of the DeleteCanary operation determines whether the Lambda functions and layers will be deleted.

        " } } }, @@ -1856,7 +1881,7 @@ }, "Code":{ "shape":"CanaryCodeInput", - "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

        " + "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an Amazon S3 bucket, the bucket name, key, and version are also included.

        " }, "ExecutionRoleArn":{ "shape":"RoleArn", @@ -1892,7 +1917,7 @@ }, "ArtifactS3Location":{ "shape":"String", - "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the S3 bucket can't include a period (.).

        " + "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the Amazon S3 bucket can't include a period (.).

        " }, "ArtifactConfig":{ "shape":"ArtifactConfigInput", diff --git a/services/taxsettings/pom.xml b/services/taxsettings/pom.xml index 3d8d487e6cf2..05c939b7120c 100644 --- a/services/taxsettings/pom.xml +++ b/services/taxsettings/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT taxsettings AWS Java SDK :: Services :: Tax Settings diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 0a18b9a22f3c..1662fc143eb1 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreaminfluxdb/pom.xml b/services/timestreaminfluxdb/pom.xml index a12dbecc1985..19067cca3ac7 100644 --- a/services/timestreaminfluxdb/pom.xml +++ b/services/timestreaminfluxdb/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT timestreaminfluxdb AWS Java SDK :: Services :: Timestream Influx DB diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index bb40d073aa14..283a6c03465a 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 708dec5596df..6c37850163e8 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index d04cfe31ba17..69c821a5fd5a 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 24fce1e71953..232d273772d7 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribe/src/main/resources/codegen-resources/service-2.json b/services/transcribe/src/main/resources/codegen-resources/service-2.json index 6d85ea6d12b2..aa83cc0a120e 100644 --- a/services/transcribe/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribe/src/main/resources/codegen-resources/service-2.json @@ -1015,7 +1015,7 @@ "members":{ "NoteTemplate":{ "shape":"MedicalScribeNoteTemplate", - "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.

        " + "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.

        • BIRP: Focuses on the patient's behavioral patterns and responses. Examples of sections include Behavior, Intervention, Response, and Plan.

        • SIRP: Emphasizes the situational context of therapy. Examples of sections include Situation, Intervention, Response, and Plan.

        • DAP: Provides a simplified format for clinical documentation. Examples of sections include Data, Assessment, and Plan.

        • BEHAVIORAL_SOAP: Behavioral health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        • PHYSICAL_SOAP: Physical health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        " } }, "documentation":"

        The output configuration for clinical note generation.

        " @@ -1318,8 +1318,7 @@ }, "DeleteCallAnalyticsCategoryResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteCallAnalyticsJobRequest":{ "type":"structure", @@ -1333,8 +1332,7 @@ }, "DeleteCallAnalyticsJobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLanguageModelRequest":{ "type":"structure", @@ -2466,7 +2464,12 @@ "type":"string", "enum":[ "HISTORY_AND_PHYSICAL", - "GIRPP" + "GIRPP", + "BIRP", + "SIRP", + "DAP", + "BEHAVIORAL_SOAP", + "PHYSICAL_SOAP" ] }, "MedicalScribeOutput":{ @@ -3367,8 +3370,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -3675,8 +3677,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateCallAnalyticsCategoryRequest":{ "type":"structure", diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index f461c2a58010..6183f8ccebd3 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json b/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json +++ b/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json index 551077b31ef1..6a57f1ac5512 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json @@ -361,7 +361,7 @@ }, "NoteTemplate":{ "shape":"MedicalScribeNoteTemplate", - "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Sections include Goal, Intervention, Response, Progress, and Plan.

        " + "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.

        • BIRP: Focuses on the patient's behavioral patterns and responses. Examples of sections include Behavior, Intervention, Response, and Plan.

        • SIRP: Emphasizes the situational context of therapy. Examples of sections include Situation, Intervention, Response, and Plan.

        • DAP: Provides a simplified format for clinical documentation. Examples of sections include Data, Assessment, and Plan.

        • BEHAVIORAL_SOAP: Behavioral health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        • PHYSICAL_SOAP: Physical health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        " } }, "documentation":"

        The output configuration for aggregated transcript and clinical note generation.

        " @@ -421,11 +421,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the utterance that was identified as PII.

        " + "documentation":"

        The start time of the utterance that was identified as PII in seconds, with millisecond precision (e.g., 1.056)

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the utterance that was identified as PII.

        " + "documentation":"

        The end time of the utterance that was identified as PII in seconds, with millisecond precision (e.g., 1.056)

        " }, "Category":{ "shape":"String", @@ -507,11 +507,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The start time of the transcribed item in seconds, with millisecond precision (e.g., 1.056)

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The end time of the transcribed item in seconds, with millisecond precision (e.g., 1.056)

        " }, "Type":{ "shape":"ItemType", @@ -706,11 +706,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the utterance that was identified as PHI.

        " + "documentation":"

        The start time, in seconds, of the utterance that was identified as PHI.

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the utterance that was identified as PHI.

        " + "documentation":"

        The end time, in seconds, of the utterance that was identified as PHI.

        " }, "Category":{ "shape":"String", @@ -736,11 +736,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The start time, in seconds, of the transcribed item.

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The end time, in seconds, of the transcribed item.

        " }, "Type":{ "shape":"ItemType", @@ -774,11 +774,11 @@ }, "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the Result.

        " + "documentation":"

        The start time, in seconds, of the Result.

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the Result.

        " + "documentation":"

        The end time, in seconds, of the Result.

        " }, "IsPartial":{ "shape":"Boolean", @@ -932,7 +932,12 @@ "type":"string", "enum":[ "HISTORY_AND_PHYSICAL", - "GIRPP" + "GIRPP", + "DAP", + "SIRP", + "BIRP", + "BEHAVIORAL_SOAP", + "PHYSICAL_SOAP" ] }, "MedicalScribeParticipantRole":{ @@ -1297,11 +1302,11 @@ }, "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the Result.

        " + "documentation":"

        The start time of the Result in seconds, with millisecond precision (e.g., 1.056).

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the Result.

        " + "documentation":"

        The end time of the Result in seconds, with millisecond precision (e.g., 1.056).

        " }, "IsPartial":{ "shape":"Boolean", diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index 8e0c2b7bf7c2..d20b4ff83f5a 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/translate/pom.xml b/services/translate/pom.xml index f2228b559884..100c37c9539c 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 translate diff --git a/services/trustedadvisor/pom.xml b/services/trustedadvisor/pom.xml index 5959d13e38d6..54bb4b9075d6 100644 --- a/services/trustedadvisor/pom.xml +++ b/services/trustedadvisor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT trustedadvisor AWS Java SDK :: Services :: Trusted Advisor diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml index b4f99199d81a..b796006438bb 100644 --- a/services/verifiedpermissions/pom.xml +++ b/services/verifiedpermissions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT verifiedpermissions AWS Java SDK :: Services :: Verified Permissions diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index 12e1c28dcb47..0bd1b79a97ce 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 08184107c13e..5ab8c221a874 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 62ff9d892d3d..8e510f5f3dbd 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index d450811b41b3..3863f3c45e72 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/paginators-1.json b/services/wafv2/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/wafv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/wafv2/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index 813353a9b906..d3ef609ea261 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -997,6 +997,11 @@ "type":"integer", "min":0 }, + "ASN":{ + "type":"long", + "max":4294967295, + "min":0 + }, "AWSManagedRulesACFPRuleSet":{ "type":"structure", "required":[ @@ -1026,7 +1031,7 @@ "documentation":"

        Allow the use of regular expressions in the registration page path and the account creation path.

        " } }, - "documentation":"

        Details for your use of the account creation fraud prevention managed rule group, AWSManagedRulesACFPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        " + "documentation":"

        Details for your use of the account creation fraud prevention managed rule group, AWSManagedRulesACFPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " }, "AWSManagedRulesATPRuleSet":{ "type":"structure", @@ -1049,7 +1054,22 @@ "documentation":"

        Allow the use of regular expressions in the login page path.

        " } }, - "documentation":"

        Details for your use of the account takeover prevention managed rule group, AWSManagedRulesATPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        " + "documentation":"

        Details for your use of the account takeover prevention managed rule group, AWSManagedRulesATPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " + }, + "AWSManagedRulesAntiDDoSRuleSet":{ + "type":"structure", + "required":["ClientSideActionConfig"], + "members":{ + "ClientSideActionConfig":{ + "shape":"ClientSideActionConfig", + "documentation":"

        Configures the request handling that's applied by the managed rule group rules ChallengeAllDuringEvent and ChallengeDDoSRequests during a distributed denial of service (DDoS) attack.

        " + }, + "SensitivityToBlock":{ + "shape":"SensitivityToAct", + "documentation":"

        The sensitivity that the rule group rule DDoSRequests uses when matching against the DDoS suspicion labeling on a request. The managed rule group adds the labeling during DDoS events, before the DDoSRequests rule runs.

        The higher the sensitivity, the more levels of labeling that the rule matches:

        • Low sensitivity is less sensitive, causing the rule to match only on the most likely participants in an attack, which are the requests with the high suspicion label awswaf:managed:aws:anti-ddos:high-suspicion-ddos-request.

        • Medium sensitivity causes the rule to match on the medium and high suspicion labels.

        • High sensitivity causes the rule to match on all of the suspicion labels: low, medium, and high.

        Default: LOW

        " + } + }, + "documentation":"

        Configures the use of the anti-DDoS managed rule group, AWSManagedRulesAntiDDoSRuleSet. This configuration is used in ManagedRuleGroupConfig.

        The configuration that you provide here determines whether and how the rules in the rule group are used.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " }, "AWSManagedRulesBotControlRuleSet":{ "type":"structure", @@ -1064,7 +1084,7 @@ "documentation":"

        Applies only to the targeted inspection level.

        Determines whether to use machine learning (ML) to analyze your web traffic for bot-related activity. Machine learning is required for the Bot Control rules TGT_ML_CoordinatedActivityLow and TGT_ML_CoordinatedActivityMedium, which inspect for anomalous behavior that might indicate distributed, coordinated bot activity.

        For more information about this choice, see the listing for these rules in the table at Bot Control rules listing in the WAF Developer Guide.

        Default: TRUE

        " } }, - "documentation":"

        Details for your use of the Bot Control managed rule group, AWSManagedRulesBotControlRuleSet. This configuration is used in ManagedRuleGroupConfig.

        " + "documentation":"

        Details for your use of the Bot Control managed rule group, AWSManagedRulesBotControlRuleSet. This configuration is used in ManagedRuleGroupConfig.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " }, "Action":{"type":"string"}, "ActionCondition":{ @@ -1106,14 +1126,12 @@ }, "All":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect all of the elements that WAF has parsed and extracted from the web request component that you've identified in your FieldToMatch specifications.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"All\": {}

        " }, "AllQueryArguments":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect all query arguments of the web request.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"AllQueryArguments\": {}

        " }, "AllowAction":{ @@ -1137,6 +1155,27 @@ }, "documentation":"

        A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement.

        " }, + "AsnList":{ + "type":"list", + "member":{"shape":"ASN"}, + "max":100, + "min":1 + }, + "AsnMatchStatement":{ + "type":"structure", + "required":["AsnList"], + "members":{ + "AsnList":{ + "shape":"AsnList", + "documentation":"

        Contains one or more Autonomous System Numbers (ASNs). ASNs are unique identifiers assigned to large internet networks managed by organizations such as internet service providers, enterprises, universities, or government agencies.

        " + }, + "ForwardedIPConfig":{ + "shape":"ForwardedIPConfig", + "documentation":"

        The configuration for inspecting IP addresses to match against an ASN in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

        " + } + }, + "documentation":"

        A rule statement that inspects web traffic based on the Autonomous System Number (ASN) associated with the request's IP address.

        For additional details, see ASN match rule statement in the WAF Developer Guide.

        " + }, "AssociateWebACLRequest":{ "type":"structure", "required":[ @@ -1156,8 +1195,7 @@ }, "AssociateWebACLResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AssociatedResourceType":{ "type":"string", @@ -1342,6 +1380,36 @@ } } }, + "ClientSideAction":{ + "type":"structure", + "required":["UsageOfAction"], + "members":{ + "UsageOfAction":{ + "shape":"UsageOfAction", + "documentation":"

        Determines whether to use the AWSManagedRulesAntiDDoSRuleSet rules ChallengeAllDuringEvent and ChallengeDDoSRequests in the rule group evaluation and the related label awswaf:managed:aws:anti-ddos:challengeable-request.

        • If usage is enabled:

          • The managed rule group adds the label awswaf:managed:aws:anti-ddos:challengeable-request to any web request whose URL does NOT match the regular expressions provided in the ClientSideAction setting ExemptUriRegularExpressions.

          • The two rules are evaluated against web requests for protected resources that are experiencing a DDoS attack. The two rules only apply their action to matching requests that have the label awswaf:managed:aws:anti-ddos:challengeable-request.

        • If usage is disabled:

          • The managed rule group doesn't add the label awswaf:managed:aws:anti-ddos:challengeable-request to any web requests.

          • The two rules are not evaluated.

          • None of the other ClientSideAction settings have any effect.

        This setting only enables or disables the use of the two anti-DDOS rules ChallengeAllDuringEvent and ChallengeDDoSRequests in the anti-DDoS managed rule group.

        This setting doesn't alter the action setting in the two rules. To override the actions used by the rules ChallengeAllDuringEvent and ChallengeDDoSRequests, enable this setting, and then override the rule actions in the usual way, in your managed rule group configuration.

        " + }, + "Sensitivity":{ + "shape":"SensitivityToAct", + "documentation":"

        The sensitivity that the rule group rule ChallengeDDoSRequests uses when matching against the DDoS suspicion labeling on a request. The managed rule group adds the labeling during DDoS events, before the ChallengeDDoSRequests rule runs.

        The higher the sensitivity, the more levels of labeling that the rule matches:

        • Low sensitivity is less sensitive, causing the rule to match only on the most likely participants in an attack, which are the requests with the high suspicion label awswaf:managed:aws:anti-ddos:high-suspicion-ddos-request.

        • Medium sensitivity causes the rule to match on the medium and high suspicion labels.

        • High sensitivity causes the rule to match on all of the suspicion labels: low, medium, and high.

        Default: HIGH

        " + }, + "ExemptUriRegularExpressions":{ + "shape":"RegularExpressionList", + "documentation":"

        The regular expression to match against the web request URI, used to identify requests that can't handle a silent browser challenge. When the ClientSideAction setting UsageOfAction is enabled, the managed rule group uses this setting to determine which requests to label with awswaf:managed:aws:anti-ddos:challengeable-request. If UsageOfAction is disabled, this setting has no effect and the managed rule group doesn't add the label to any requests.

        The anti-DDoS managed rule group doesn't evaluate the rules ChallengeDDoSRequests or ChallengeAllDuringEvent for web requests whose URIs match this regex. This is true regardless of whether you override the rule action for either of the rules in your web ACL configuration.

        Amazon Web Services recommends using a regular expression.

        This setting is required if UsageOfAction is set to ENABLED. If required, you can provide between 1 and 5 regex objects in the array of settings.

        Amazon Web Services recommends starting with the following setting. Review and update it for your application's needs:

        \\/api\\/|\\.(acc|avi|css|gif|jpe?g|js|mp[34]|ogg|otf|pdf|png|tiff?|ttf|webm|webp|woff2?)$

        " + } + }, + "documentation":"

        This is part of the AWSManagedRulesAntiDDoSRuleSet ClientSideActionConfig configuration in ManagedRuleGroupConfig.

        " + }, + "ClientSideActionConfig":{ + "type":"structure", + "required":["Challenge"], + "members":{ + "Challenge":{ + "shape":"ClientSideAction", + "documentation":"

        Configuration for the use of the AWSManagedRulesAntiDDoSRuleSet rules ChallengeAllDuringEvent and ChallengeDDoSRequests.

        This setting isn't related to the configuration of the Challenge action itself. It only configures the use of the two anti-DDoS rules named here.

        You can enable or disable the use of these rules, and you can configure how to use them when they are enabled.

        " + } + }, + "documentation":"

        This is part of the configuration for the managed rules AWSManagedRulesAntiDDoSRuleSet in ManagedRuleGroupConfig.

        " + }, "ComparisonOperator":{ "type":"string", "enum":[ @@ -1915,6 +1983,10 @@ "AssociationConfig":{ "shape":"AssociationConfig", "documentation":"

        Specifies custom configurations for the associations between the web ACL and protected resources.

        Use this to customize the maximum size of the request body that your protected resources forward to WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).

        You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

        For Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).

        " + }, + "OnSourceDDoSProtectionConfig":{ + "shape":"OnSourceDDoSProtectionConfig", + "documentation":"

        Specifies the type of DDoS protection to apply to web request data for a web ACL. For most scenarios, it is recommended to use the default protection level, ACTIVE_UNDER_DDOS. If a web ACL is associated with multiple Application Load Balancers, the changes you make to DDoS protection in that web ACL will apply to all associated Application Load Balancers.

        " } } }, @@ -2105,8 +2177,7 @@ }, "DeleteAPIKeyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFirewallManagerRuleGroupsRequest":{ "type":"structure", @@ -2163,8 +2234,7 @@ }, "DeleteIPSetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLoggingConfigurationRequest":{ "type":"structure", @@ -2186,8 +2256,7 @@ }, "DeleteLoggingConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePermissionPolicyRequest":{ "type":"structure", @@ -2201,8 +2270,7 @@ }, "DeletePermissionPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRegexPatternSetRequest":{ "type":"structure", @@ -2233,8 +2301,7 @@ }, "DeleteRegexPatternSetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRuleGroupRequest":{ "type":"structure", @@ -2265,8 +2332,7 @@ }, "DeleteRuleGroupResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWebACLRequest":{ "type":"structure", @@ -2297,8 +2363,7 @@ }, "DeleteWebACLResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAllManagedProductsRequest":{ "type":"structure", @@ -2416,8 +2481,7 @@ }, "DisassociateWebACLResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DownloadUrl":{"type":"string"}, "EmailField":{ @@ -2715,7 +2779,7 @@ "documentation":"

        The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

        If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

        You can specify the following fallback behaviors:

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " } }, - "documentation":"

        The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

        If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

        This configuration is used for GeoMatchStatement and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

        WAF only evaluates the first IP address found in the specified HTTP header.

        " + "documentation":"

        The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

        If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

        This configuration is used for GeoMatchStatement, AsnMatchStatement, and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

        WAF only evaluates the first IP address found in the specified HTTP header.

        " }, "ForwardedIPHeaderName":{ "type":"string", @@ -3243,7 +3307,7 @@ "members":{ "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

        What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " + "documentation":"

        What WAF should do if the headers determined by your match scope are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " } }, "documentation":"

        Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer.

        " @@ -3267,7 +3331,7 @@ }, "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

        What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " + "documentation":"

        What WAF should do if the headers determined by your match scope are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " } }, "documentation":"

        Inspect all headers in the web request. You can specify the parts of the headers to inspect and you can narrow the set of headers to inspect by including or excluding specific keys.

        This is used to indicate the web request component to inspect, in the FieldToMatch specification.

        If you want to inspect just the value of a single header, use the SingleHeader FieldToMatch setting instead.

        Example JSON: \"Headers\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"KEY\", \"OversizeHandling\": \"MATCH\" }

        " @@ -4059,6 +4123,13 @@ "min":1, "pattern":".*\\S.*" }, + "LowReputationMode":{ + "type":"string", + "enum":[ + "ACTIVE_UNDER_DDOS", + "ALWAYS_ON" + ] + }, "ManagedProductDescriptor":{ "type":"structure", "members":{ @@ -4143,9 +4214,13 @@ "AWSManagedRulesACFPRuleSet":{ "shape":"AWSManagedRulesACFPRuleSet", "documentation":"

        Additional configuration for using the account creation fraud prevention (ACFP) managed rule group, AWSManagedRulesACFPRuleSet. Use this to provide account creation request information to the rule group. For web ACLs that protect CloudFront distributions, use this to also provide the information about how your distribution responds to account creation requests.

        For information about using the ACFP managed rule group, see WAF Fraud Control account creation fraud prevention (ACFP) rule group and WAF Fraud Control account creation fraud prevention (ACFP) in the WAF Developer Guide.

        " + }, + "AWSManagedRulesAntiDDoSRuleSet":{ + "shape":"AWSManagedRulesAntiDDoSRuleSet", + "documentation":"

        Additional configuration for using the anti-DDoS managed rule group, AWSManagedRulesAntiDDoSRuleSet. Use this to configure anti-DDoS behavior for the rule group.

        For information about using the anti-DDoS managed rule group, see WAF Anti-DDoS rule group and Distributed Denial of Service (DDoS) prevention in the WAF Developer Guide.

        " } }, - "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        For example specifications, see the examples section of CreateWebACL.

        " + "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesAntiDDoSRuleSet configuration object to configure the anti-DDoS managed rule group. The configuration includes the sensitivity levels to use in the rules that typically block and challenge requests that might be participating in DDoS attacks and the specification to use to indicate whether a request can handle a silent browser challenge.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        For example specifications, see the examples section of CreateWebACL.

        " }, "ManagedRuleGroupConfigs":{ "type":"list", @@ -4180,11 +4255,11 @@ }, "ManagedRuleGroupConfigs":{ "shape":"ManagedRuleGroupConfigs", - "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        " + "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesAntiDDoSRuleSet configuration object to configure the anti-DDoS managed rule group. The configuration includes the sensitivity levels to use in the rules that typically block and challenge requests that might be participating in DDoS attacks and the specification to use to indicate whether a request can handle a silent browser challenge.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        " }, "RuleActionOverrides":{ "shape":"RuleActionOverrides", - "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, WAF doesn't return an error and doesn't apply the override setting.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " + "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Verify the rule names in your overrides carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " } }, "documentation":"

        A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

        You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. You cannot use a managed rule group inside another rule group. You can only reference a managed rule group as a top-level statement within a rule that you define in a web ACL.

        You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. For more information, see WAF Pricing.

        " @@ -4346,8 +4421,7 @@ }, "Method":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect the HTTP method of the web request. The method indicates the type of operation that the request is asking the origin to perform.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"Method\": {}

        " }, "MetricName":{ @@ -4386,8 +4460,7 @@ }, "NoneAction":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies that WAF should do nothing. This is used for the OverrideAction setting on a Rule when the rule uses a rule group reference statement.

        This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

        JSON specification: \"None\": {}

        " }, "NotStatement":{ @@ -4401,6 +4474,17 @@ }, "documentation":"

        A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement.

        " }, + "OnSourceDDoSProtectionConfig":{ + "type":"structure", + "required":["ALBLowReputationMode"], + "members":{ + "ALBLowReputationMode":{ + "shape":"LowReputationMode", + "documentation":"

        The level of DDoS protection that applies to web ACLs associated with Application Load Balancers. ACTIVE_UNDER_DDOS protection is enabled by default whenever a web ACL is associated with an Application Load Balancer. In the event that an Application Load Balancer experiences high-load conditions or suspected DDoS attacks, the ACTIVE_UNDER_DDOS protection automatically rate limits traffic from known low reputation sources without disrupting Application Load Balancer availability. ALWAYS_ON protection provides constant, always-on monitoring of known low reputation sources for suspected DDoS attacks. While this provides a higher level of protection, there may be potential impacts on legitimate traffic.

        " + } + }, + "documentation":"

        Configures the level of DDoS protection that applies to web ACLs associated with Application Load Balancers.

        " + }, "OrStatement":{ "type":"structure", "required":["Statements"], @@ -4513,7 +4597,8 @@ "SCOPE_DOWN", "CUSTOM_KEYS", "ACP_RULE_SET_RESPONSE_INSPECTION", - "DATA_PROTECTION_CONFIG" + "DATA_PROTECTION_CONFIG", + "LOW_REPUTATION_MODE" ] }, "ParameterExceptionParameter":{ @@ -4686,13 +4771,11 @@ }, "PutPermissionPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "QueryString":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect the query string of the web request. This is the part of a URL that appears after a ? character, if any.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"QueryString\": {}

        " }, "RateBasedStatement":{ @@ -4784,6 +4867,10 @@ "JA4Fingerprint":{ "shape":"RateLimitJA4Fingerprint", "documentation":"

        Use the request's JA4 fingerprint as an aggregate key. If you use a single JA4 fingerprint as your custom key, then each value fully defines an aggregation instance.

        " + }, + "ASN":{ + "shape":"RateLimitAsn", + "documentation":"

        Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. Each distinct ASN contributes to the aggregation instance.

        " } }, "documentation":"

        Specifies a single custom aggregate key for a rate-base rule.

        Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling.

        " @@ -4812,6 +4899,11 @@ "max":2000000000, "min":10 }, + "RateLimitAsn":{ + "type":"structure", + "members":{}, + "documentation":"

        Specifies an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key for a rate-based rule. Each distinct ASN contributes to the aggregation instance. If you use a single ASN as your custom key, then each ASN fully defines an aggregation instance.

        " + }, "RateLimitCookie":{ "type":"structure", "required":[ @@ -4832,14 +4924,12 @@ }, "RateLimitForwardedIP":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the first IP address in an HTTP header as an aggregate key for a rate-based rule. Each distinct forwarded IP address contributes to the aggregation instance.

        This setting is used only in the RateBasedStatementCustomKey specification of a rate-based rule statement. When you specify an IP or forwarded IP in the custom key settings, you must also specify at least one other key to use. You can aggregate on only the forwarded IP address by specifying FORWARDED_IP in your rate-based statement's AggregateKeyType.

        This data type supports using the forwarded IP address in the web request aggregation for a rate-based rule, in RateBasedStatementCustomKey. The JSON specification for using the forwarded IP address doesn't explicitly use this data type.

        JSON specification: \"ForwardedIP\": {}

        When you use this specification, you must also configure the forwarded IP address in the rate-based statement's ForwardedIPConfig.

        " }, "RateLimitHTTPMethod":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the request's HTTP method as an aggregate key for a rate-based rule. Each distinct HTTP method contributes to the aggregation instance. If you use just the HTTP method as your custom key, then each method fully defines an aggregation instance.

        JSON specification: \"RateLimitHTTPMethod\": {}

        " }, "RateLimitHeader":{ @@ -4862,8 +4952,7 @@ }, "RateLimitIP":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the IP address in the web request as an aggregate key for a rate-based rule. Each distinct IP address contributes to the aggregation instance.

        This setting is used only in the RateBasedStatementCustomKey specification of a rate-based rule statement. To use this in the custom key settings, you must specify at least one other key to use, along with the IP address. To aggregate on only the IP address, in your rate-based statement's AggregateKeyType, specify IP.

        JSON specification: \"RateLimitIP\": {}

        " }, "RateLimitJA3Fingerprint":{ @@ -4952,7 +5041,7 @@ "documentation":"

        The string representing the regular expression.

        " } }, - "documentation":"

        A single regular expression. This is used in a RegexPatternSet.

        " + "documentation":"

        A single regular expression. This is used in a RegexPatternSet and also in the configuration for the Amazon Web Services Managed Rules rule group AWSManagedRulesAntiDDoSRuleSet.

        " }, "RegexMatchStatement":{ "type":"structure", @@ -5529,7 +5618,7 @@ }, "RuleActionOverrides":{ "shape":"RuleActionOverrides", - "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, WAF doesn't return an error and doesn't apply the override setting.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " + "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Verify the rule names in your overrides carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " } }, "documentation":"

        A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

        You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. You cannot use a rule group reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.

        " @@ -5667,6 +5756,14 @@ "HIGH" ] }, + "SensitivityToAct":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, "SingleCookieName":{ "type":"string", "max":60, @@ -5823,6 +5920,10 @@ "RegexMatchStatement":{ "shape":"RegexMatchStatement", "documentation":"

        A rule statement used to search web request components for a match against a single regular expression.

        " + }, + "AsnMatchStatement":{ + "shape":"AsnMatchStatement", + "documentation":"

        A rule statement that inspects web traffic based on the Autonomous System Number (ASN) associated with the request's IP address.

        For additional details, see ASN match rule statement in the WAF Developer Guide.

        " } }, "documentation":"

        The processing guidance for a Rule, used by WAF to determine whether a web request matches the rule.

        For example specifications, see the examples section of CreateWebACL.

        " @@ -5910,8 +6011,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -6030,8 +6130,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateIPSetRequest":{ "type":"structure", @@ -6296,6 +6395,10 @@ "AssociationConfig":{ "shape":"AssociationConfig", "documentation":"

        Specifies custom configurations for the associations between the web ACL and protected resources.

        Use this to customize the maximum size of the request body that your protected resources forward to WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).

        You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

        For Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).

        " + }, + "OnSourceDDoSProtectionConfig":{ + "shape":"OnSourceDDoSProtectionConfig", + "documentation":"

        Specifies the type of DDoS protection to apply to web request data for a web ACL. For most scenarios, it is recommended to use the default protection level, ACTIVE_UNDER_DDOS. If a web ACL is associated with multiple Application Load Balancers, the changes you make to DDoS protection in that web ACL will apply to all associated Application Load Balancers.

        " } } }, @@ -6320,10 +6423,16 @@ }, "UriPath":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect the path component of the URI of the web request. This is the part of the web request that identifies a resource. For example, /images/daily-ad.jpg.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"UriPath\": {}

        " }, + "UsageOfAction":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "UsernameField":{ "type":"structure", "required":["Identifier"], @@ -6644,6 +6753,10 @@ "RetrofittedByFirewallManager":{ "shape":"Boolean", "documentation":"

        Indicates whether this web ACL was created by a customer account and then retrofitted by Firewall Manager. If true, then the web ACL is currently being managed by a Firewall Manager WAF policy, and only Firewall Manager can manage any Firewall Manager rule groups in the web ACL. See also the properties ManagedByFirewallManager, PreProcessFirewallManagerRuleGroups, and PostProcessFirewallManagerRuleGroups.

        " + }, + "OnSourceDDoSProtectionConfig":{ + "shape":"OnSourceDDoSProtectionConfig", + "documentation":"

        Configures the level of DDoS protection that applies to web ACLs associated with Application Load Balancers.

        " } }, "documentation":"

        A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, Amplify application, and Amazon Web Services Verified Access instance.

        " diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index f0bba282893a..c7376a139f7d 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index 47b2abf6a5e2..bd7f213d22eb 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index ec2e237dcc60..5ee53d49a64a 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index 9b3c1e9d12f0..cdc7b5d85b95 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index c9df0ec6c4d5..012c5c9c3df7 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 2d0d320dc949..5cbd82fbefea 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspacesthinclient/pom.xml b/services/workspacesthinclient/pom.xml index 87fe553e63c2..345a5a2dc865 100644 --- a/services/workspacesthinclient/pom.xml +++ b/services/workspacesthinclient/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT workspacesthinclient AWS Java SDK :: Services :: Work Spaces Thin Client diff --git a/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json b/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json index 4769a2e652cd..e80fd3f4c615 100644 --- a/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json +++ b/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json @@ -26,9 +26,9 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Creates an environment for your thin client devices.

        ", @@ -46,9 +46,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Deletes a thin client device.

        ", @@ -67,9 +67,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Deletes an environment.

        ", @@ -88,9 +88,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Deregisters a thin client device.

        ", @@ -239,9 +239,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Assigns one or more tags (key-value pairs) to the specified resource.

        ", @@ -259,9 +259,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Removes a tag or tags from a resource.

        ", @@ -300,6 +300,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index cd048764f495..40b591e74bf6 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 69cbc470ddf4..b8ebfb6fe0e6 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/architecture-tests/pom.xml b/test/architecture-tests/pom.xml index 225ec789cdf1..3b06a96cd2f4 100644 --- a/test/architecture-tests/pom.xml +++ b/test/architecture-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml diff --git a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java index d3b3af42cb20..acae7b3b4a0e 100644 --- a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java +++ b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java @@ -26,6 +26,11 @@ public static Pattern classNameToPattern(Class clazz) { return Pattern.compile(".*/" + clazz.getCanonicalName().replace('.', '/') + ".class"); } + public static Pattern classWithInnerClassesToPattern(Class clazz) { + // inner or inline/anonymous classes have $ followed by a name or number eg "$Inner" or "$1" + return Pattern.compile(".*/" + clazz.getCanonicalName().replace('.', '/') + "(\\$.*)?.class"); + } + public static Pattern classNameToPattern(String className) { return Pattern.compile(".*/" + className.replace('.', '/') + ".class"); } diff --git a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java index fd921257f5f5..b54955cc56f2 100644 --- a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java +++ b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java @@ -33,6 +33,7 @@ import java.util.regex.Pattern; import org.junit.jupiter.api.Test; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeHttpRequestStage; +import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.metrics.publishers.emf.EmfMetricLoggingPublisher; import software.amazon.awssdk.metrics.publishers.emf.internal.MetricEmfConverter; import software.amazon.awssdk.utils.Logger; @@ -52,7 +53,9 @@ public class CodingConventionWithSuppressionTest { ArchUtils.classNameToPattern("software.amazon.awssdk.services.s3.internal.crt.S3CrtResponseHandlerAdapter"))); private static final Set ALLOWED_ERROR_LOG_SUPPRESSION = new HashSet<>( - Arrays.asList(ArchUtils.classNameToPattern(EmfMetricLoggingPublisher.class))); + Arrays.asList( + ArchUtils.classNameToPattern(EmfMetricLoggingPublisher.class), + ArchUtils.classWithInnerClassesToPattern(ResponseTransformer.class))); @Test void shouldNotAbuseWarnLog() { diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 19d2afa603b3..e1909fdec568 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/bundle-logging-bridge-binding-test/pom.xml b/test/bundle-logging-bridge-binding-test/pom.xml index a9c2ad72914b..d1e1802448dd 100644 --- a/test/bundle-logging-bridge-binding-test/pom.xml +++ b/test/bundle-logging-bridge-binding-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/bundle-shading-tests/pom.xml b/test/bundle-shading-tests/pom.xml index 996ed8129599..9bdb03771a42 100644 --- a/test/bundle-shading-tests/pom.xml +++ b/test/bundle-shading-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 796e62ac7bba..234dda323541 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config new file mode 100644 index 000000000000..86839537eeab --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config @@ -0,0 +1,4 @@ +{ + "skipEndpointTestGeneration": true, + "enableEnvironmentBearerToken": true +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json new file mode 100644 index 000000000000..cc38f1ffb165 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json @@ -0,0 +1,355 @@ +{ + "version": "1.3", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": true, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://environment-token-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://environment-token-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://environment-token.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://environment-token.{Region}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json new file mode 100644 index 000000000000..f94902ff9d99 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json @@ -0,0 +1,5 @@ +{ + "testCases": [ + ], + "version": "1.0" +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json new file mode 100644 index 000000000000..c70811a87f80 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json @@ -0,0 +1,38 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-03-11", + "endpointPrefix":"environment-token", + "auth":["aws.auth#sigv4", "smithy.api#httpBearerAuth"], + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"EnvironmentTokenProviderService", + "serviceFullName":"Environment Token Provider Service", + "serviceId":"EnvironmentTokenProviderService", + "signatureVersion":"v4", + "targetPrefix":"EnvironmentTokenProviderService", + "timestampFormat":"unixTimestamp", + "uid":"restjson-2016-03-11" + }, + "operations":{ + "OneOperation":{ + "name":"OneOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/oneoperation" + }, + "input":{"shape":"OneShape"} + } + }, + "shapes": { + "OneShape": { + "type": "structure", + "members": { + "StringMember": { + "shape": "String" + } + } + }, + "String":{"type":"string"} + } +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json index b5047e2734fc..6fd1f03e1fd1 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json @@ -13,7 +13,7 @@ "timestampFormat":"unixTimestamp", "uid":"restjson-2016-03-11" }, - "operations":{ + "operations": { "multiAuthWithOnlySigv4a":{ "name":"multiAuthWithOnlySigv4a", "http":{ @@ -26,6 +26,18 @@ "ApiType":{"value":"NoEndpointSigningProperties"} } }, + "multiAuthWithOnlySigv4":{ + "name":"multiAuthWithOnlySigv4", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/multiAuthWithOnlySigv4" + }, + "input":{"shape":"SampleRequest"}, + "auth": ["aws.auth#sigv4"], + "staticContextParams":{ + "ApiType":{"value":"NoEndpointSigningProperties"} + } + }, "multiAuthWithOnlySigv4aAndSigv4":{ "name":"multiAuthWithOnlySigv4aAndSigv4", "http":{ @@ -72,6 +84,17 @@ "value": "onlySigv4a" } } + }, + "multiAuthWithoutAuthScheme":{ + "name":"multiAuthWithoutAuthScheme", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/multiAuthWithoutAuthScheme" + }, + "input":{"shape":"SampleRequest"}, + "staticContextParams":{ + "ApiType":{"value":"NoEndpointSigningProperties"} + } } }, "shapes": { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java new file mode 100644 index 000000000000..1868848bdf3c --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java @@ -0,0 +1,121 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.multiauth.auth.scheme.MultiauthAuthSchemeParams; +import software.amazon.awssdk.services.multiauth.auth.scheme.MultiauthAuthSchemeProvider; + +public class PreferredAuthSchemeProviderTest { + + private static final String OPERATION_SIGV4_ONLY = "multiAuthWithOnlySigv4"; + private static final String OPERATION_SIGV4A_ONLY = "multiAuthWithOnlySigv4a"; + private static final String OPERATION_SIGV4A_AND_SIGV4 = "multiAuthWithOnlySigv4aAndSigv4"; + private static final String OPERATION_NOAUTH = "multiAuthNoAuth"; + + private static final String SIGV4 = "sigv4"; + private static final String PREFIXED_SIGV4 = "aws.auth#sigv4"; + private static final String PREFIXED_SIGV4A = "aws.auth#sigv4a"; + private static final String SIGV4A = "sigv4a"; + private static final String BEARER = "bearer"; + private static final String ANONYMOUS = "noauth"; + + @ParameterizedTest(name = "{3}") + @MethodSource("authSchemeTestCases") + void testAuthSchemePreference(List preferredAuthSchemes, String operation, String expectedFirstScheme, String testName) { + MultiauthAuthSchemeProvider provider = MultiauthAuthSchemeProvider.defaultProvider(preferredAuthSchemes); + + MultiauthAuthSchemeParams params = MultiauthAuthSchemeParams + .builder() + .region(Region.US_WEST_2) + .operation(operation) + .build(); + + List authSchemes = provider.resolveAuthScheme(params); + + Assertions.assertFalse(authSchemes.isEmpty()); + Assertions.assertEquals(expectedFirstScheme, authSchemes.get(0).schemeId()); + } + + static Stream authSchemeTestCases() { + return Stream.of( + Arguments.of( + Arrays.asList(BEARER, ANONYMOUS), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4A, + "Unsupported auth schemes only" + ), + + Arguments.of( + Arrays.asList(SIGV4, SIGV4A), + OPERATION_NOAUTH, + PREFIXED_SIGV4, + "Operation with no auth scheme should default to Sigv4" + ), + + Arguments.of( + Arrays.asList(BEARER, SIGV4, ANONYMOUS), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4, + "Mix of supported and unsupported schemes" + ), + + Arguments.of( + Arrays.asList(SIGV4, SIGV4A), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4, + "All supported schemes in reverse order" + ), + + Arguments.of( + Arrays.asList(SIGV4A), + OPERATION_SIGV4_ONLY, + PREFIXED_SIGV4, + "Operation with only sigv4 supported scheme" + ), + + Arguments.of( + Arrays.asList(SIGV4, SIGV4A), + OPERATION_SIGV4A_ONLY, + PREFIXED_SIGV4A, + "Operation with only sigv4a supported scheme" + ), + + Arguments.of( + Collections.emptyList(), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4A, + "Empty preference list" + ), + + Arguments.of( + Arrays.asList(SIGV4A, SIGV4, BEARER), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4A, + "First preference is supported" + ) + ); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java index e9ff4da41fa1..371d089dea2b 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java @@ -57,6 +57,9 @@ public void syncClient_customTokenIdentityProviderSet_presentInFinalConfig() { assertThat(config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER)) .isSameAs(mockProvider); + + assertThat(builder.buildClient().serviceClientConfiguration().tokenProvider()) + .isSameAs(mockProvider); } @Test diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java new file mode 100644 index 000000000000..c16069a30dd5 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java @@ -0,0 +1,304 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.environmenttokenprovider; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.useragent.BusinessMetricFeatureId; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.environmenttokenprovider.auth.scheme.EnvironmentTokenProviderAuthSchemeProvider; +import software.amazon.awssdk.services.environmenttokenprovider.model.OneOperationRequest; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class EnvironmentTokenProviderTest { + private static final String ENV_NAME = "AWS_BEARER_TOKEN_ENVIRONMENT_TOKEN"; + private static final String SYSTEM_PROPERTY_NAME = "aws.bearerTokenEnvironmentToken"; + public static final String ENV_TOKEN = "env-test-token"; + public static final String SYSTEM_TEST_TOKEN = "system-test-token"; + + private MockSyncHttpClient mockHttpClient; + private MockAsyncHttpClient mockAsyncHttpClient; + private String systemPropertyBeforeTest; + + private final EnvironmentVariableHelper environmentVariableHelper = new EnvironmentVariableHelper(); + + @BeforeEach + void setUp() { + mockHttpClient = new MockSyncHttpClient(); + mockAsyncHttpClient = new MockAsyncHttpClient(); + systemPropertyBeforeTest = System.getProperty(SYSTEM_PROPERTY_NAME); + } + + @AfterEach + void tearDown() { + mockHttpClient.reset(); + mockAsyncHttpClient.reset(); + environmentVariableHelper.reset(); + if (systemPropertyBeforeTest != null) { + System.setProperty(SYSTEM_PROPERTY_NAME, systemPropertyBeforeTest); + } else { + System.clearProperty(SYSTEM_PROPERTY_NAME); + } + } + + @ParameterizedTest + @MethodSource("testCases") + void testAsyncClient(TestCase testCase) { + setupSystemAndEnv(testCase); + + mockAsyncHttpClient.stubNextResponse(mockResponse()); + + EnvironmentTokenProviderAsyncClientBuilder clientBuilder = EnvironmentTokenProviderAsyncClient + .builder() + .httpClient(mockAsyncHttpClient); + + if (testCase.authSchemeProvider != null) { + clientBuilder.authSchemeProvider(testCase.authSchemeProvider); + } + + EnvironmentTokenProviderAsyncClient client = clientBuilder.build(); + + if (testCase.operationToken == null) { + client.oneOperation(b -> {} ).join(); + } else { + client.oneOperation(requestWithOperationToken(testCase)).join(); + } + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + + verifyRequest(testCase, loggedRequest); + } + + @ParameterizedTest + @MethodSource("testCases") + void testSyncClient(TestCase testCase) { + setupSystemAndEnv(testCase); + + mockHttpClient.stubNextResponse(mockResponse()); + + EnvironmentTokenProviderClientBuilder clientBuilder = EnvironmentTokenProviderClient + .builder() + .httpClient(mockHttpClient); + + if (testCase.authSchemeProvider != null) { + clientBuilder.authSchemeProvider(testCase.authSchemeProvider); + } + + EnvironmentTokenProviderClient client = clientBuilder.build(); + + if (testCase.operationToken == null) { + client.oneOperation(b -> {} ); + } else { + client.oneOperation(requestWithOperationToken(testCase)); + } + + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + + verifyRequest(testCase, loggedRequest); + } + + private static void verifyRequest(TestCase testCase, SdkHttpFullRequest loggedRequest) { + if (testCase.expectBearerAuth) { + assertThat(loggedRequest.firstMatchingHeader("Authorization").get()) + .startsWith("Bearer"); + } else { + assertThat(loggedRequest.firstMatchingHeader("Authorization") + .get()).startsWith("AWS4-HMAC-SHA256"); + } + + if (testCase.expectBusinessMetricSet) { + assertThat(loggedRequest.firstMatchingHeader("User-Agent").get()) + .matches(".*m\\/[A-Za-z0-9,]+" + BusinessMetricFeatureId.BEARER_SERVICE_ENV_VARS); + } else { + assertThat(loggedRequest.firstMatchingHeader("User-Agent").get()) + .doesNotMatch(".*m\\/[A-Za-z0-9,]+" + BusinessMetricFeatureId.BEARER_SERVICE_ENV_VARS); + } + } + + static Stream testCases() { + return Stream.of( + TestCase.builder() + .description("Does not use bearer auth when ENV token is unset") + .expectBearerAuth(false) + .build(), + + TestCase.builder() + .description("Uses bearer auth when ENV token is set") + .envVar(ENV_NAME, ENV_TOKEN) + .expectBearerAuth(true) + .expectedBearerToken(ENV_TOKEN) + .expectBusinessMetricSet(true) + .build(), + + TestCase.builder() + .description("Uses bearer auth when system property token is set") + .envVar(ENV_NAME, "some-other-token") + .systemProperty(SYSTEM_TEST_TOKEN) + .expectBearerAuth(true) + .expectedBearerToken(SYSTEM_TEST_TOKEN) + .expectBusinessMetricSet(true) + .build(), + + TestCase.builder() + .description("Uses bearer auth from environment over auth scheme preference") + .envVar(ENV_NAME, ENV_TOKEN) + .envVar( + SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.environmentVariable(), + "sigv4") + .expectBearerAuth(true) + .expectedBearerToken(ENV_TOKEN) + .expectBusinessMetricSet(true) + .build(), + + TestCase.builder() + .description("Doesn't use bearer when AuthSchemeProvider is manually configured on the client") + .envVar(ENV_NAME, ENV_TOKEN) + .authSchemeProvider(EnvironmentTokenProviderAuthSchemeProvider.defaultProvider()) + .expectBearerAuth(false) + .expectBusinessMetricSet(false) + .build(), + + TestCase.builder() + .description("Business metric is not set when the token is overridden on the operation") + .envVar(ENV_NAME, ENV_TOKEN) + .operationToken("operation-token") + .expectBearerAuth(true) + .expectedBearerToken("operation-token") + .expectBusinessMetricSet(false) + .build() + ); + } + + private static OneOperationRequest requestWithOperationToken(TestCase testCase) { + return OneOperationRequest.builder() + .overrideConfiguration(c -> c.tokenIdentityProvider( + StaticTokenProvider.create(() -> testCase.operationToken))) + .build(); + } + + private void setupSystemAndEnv(TestCase testCase) { + testCase.envVars.forEach(environmentVariableHelper::set); + if (testCase.systemProperty != null) { + System.setProperty(SYSTEM_PROPERTY_NAME, testCase.systemProperty); + } + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + static final class TestCase { + final String description; + final Map envVars; + final String systemProperty; + final EnvironmentTokenProviderAuthSchemeProvider authSchemeProvider; + final String operationToken; + final boolean expectBearerAuth; + final String expectedBearerToken; + final boolean expectBusinessMetricSet; + + private TestCase(Builder builder) { + this.description = builder.description; + this.envVars = builder.envVars; + this.systemProperty = builder.systemProperty; + this.authSchemeProvider = builder.authSchemeProvider; + this.operationToken = builder.operationToken; + this.expectBearerAuth = builder.expectBearerAuth; + this.expectedBearerToken = builder.expectedBearerToken; + this.expectBusinessMetricSet = builder.expectBusinessMetricSet; + } + + @Override + public String toString() { + return description; + } + + static Builder builder() { + return new Builder(); + } + + static class Builder { + private String description; + private Map envVars = new HashMap<>(); + private String systemProperty; + private EnvironmentTokenProviderAuthSchemeProvider authSchemeProvider; + private String operationToken; + private boolean expectBearerAuth; + private String expectedBearerToken; + private boolean expectBusinessMetricSet; + + public Builder description(String description) { + this.description = description; + return this; + } + + public Builder envVar(String key, String value) { + this.envVars.put(key, value); + return this; + } + + public Builder systemProperty(String systemProperty) { + this.systemProperty = systemProperty; + return this; + } + + public Builder authSchemeProvider(EnvironmentTokenProviderAuthSchemeProvider authSchemeProvider) { + this.authSchemeProvider = authSchemeProvider; + return this; + } + + public Builder operationToken(String operationToken) { + this.operationToken = operationToken; + return this; + } + + public Builder expectBearerAuth(boolean expectBearerAuth) { + this.expectBearerAuth = expectBearerAuth; + return this; + } + + public Builder expectedBearerToken(String expectedBearerToken) { + this.expectedBearerToken = expectedBearerToken; + return this; + } + + public Builder expectBusinessMetricSet(boolean expectBusinessMetricSet) { + this.expectBusinessMetricSet = expectBusinessMetricSet; + return this; + } + + public TestCase build() { + return new TestCase(this); + } + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java new file mode 100644 index 000000000000..502f5aa20be9 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java @@ -0,0 +1,258 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.multiauth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.http.auth.spi.signer.AsyncSignRequest; +import software.amazon.awssdk.http.auth.spi.signer.AsyncSignedRequest; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; +import software.amazon.awssdk.http.auth.spi.signer.SignRequest; +import software.amazon.awssdk.http.auth.spi.signer.SignedRequest; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.multiauth.auth.scheme.MultiauthAuthSchemeProvider; +import software.amazon.awssdk.services.multiauth.model.MultiAuthWithOnlySigv4AAndSigv4Request; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; +import software.amazon.awssdk.utils.StringInputStream; + +public class AuthSchemePreferenceResolverFunctionalTest { + private final EnvironmentVariableHelper helper = new EnvironmentVariableHelper(); + + @AfterEach + void tearDown() { + System.clearProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property()); + helper.reset(); + } + + @ParameterizedTest + @MethodSource("testCases") + void resolvesAuthSchemePreference(TestCase testCase) { + try { + MultiauthClientBuilder builder = + MultiauthClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(AnonymousCredentialsProvider.create()); + + builder.putAuthScheme(authScheme("aws.auth#sigv4a", new SkipCrtNoOpSigner())); + + if (testCase.clientSetting != null) { + builder.authSchemeProvider(MultiauthAuthSchemeProvider.defaultProvider(testCase.clientSetting)); + } + + if (testCase.systemPropSetting != null) { + System.setProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property(), testCase.systemPropSetting); + } + + if (testCase.envVarSetting != null) { + helper.set(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.environmentVariable(), testCase.envVarSetting); + } + + ProfileFile.Builder profileFile = ProfileFile.builder().type(ProfileFile.Type.CONFIGURATION); + + if (testCase.profileSetting != null) { + profileFile.content(new StringInputStream("[default]\n" + + ProfileProperty.AUTH_SCHEME_PREFERENCE + " = " + testCase.profileSetting)); + } else { + profileFile.content(new StringInputStream("")); + } + + AutSchemeCapturingInterceptor interceptor = new AutSchemeCapturingInterceptor(); + + builder.overrideConfiguration(c -> c.defaultProfileFile(profileFile.build()) + .defaultProfileName("default") + .addExecutionInterceptor(interceptor)); + + MultiauthClient client = builder.build(); + + assertThatThrownBy(() -> + client.multiAuthWithOnlySigv4aAndSigv4(MultiAuthWithOnlySigv4AAndSigv4Request.builder().build()) + ).isInstanceOf(AutSchemeCapturingInterceptor.CaptureException.class); + + assertThat(interceptor.authScheme()).isEqualTo(testCase.resolvedAuthScheme); + } finally { + tearDown(); + } + } + + private static AuthScheme authScheme(String schemeId, HttpSigner signer) { + return new AuthScheme() { + @Override + public String schemeId() { + return schemeId; + } + + @Override + public IdentityProvider identityProvider(IdentityProviders providers) { + return providers.identityProvider(AwsCredentialsIdentity.class); + } + + @Override + public HttpSigner signer() { + return signer; + } + }; + } + + static Stream testCases() { + return Stream.of( + Arguments.of(new TestCase( + null, + null, + null, + Arrays.asList("sigv4", "noauth"), + "sigv4", + "Client config is used when set")), + + Arguments.of(new TestCase( + null, + null, + "sigv4,sigv4a,bearer", + null, + "sigv4", + "System property value is used")), + + Arguments.of(new TestCase( + null, + "sigv4a,sigv4,bearer", + null, + null, + "sigv4a", + "Environment variable is used when other properties is null")), + + Arguments.of(new TestCase( + "bearer,sigv4,sigv4a", + null, + null, + null, + "sigv4", + "Profile setting is used when others are null")), + + Arguments.of(new TestCase( + "", + null, + null, + null, + "sigv4a", + "Profile setting is used when explicit empty string is supplied")), + + + Arguments.of(new TestCase( + "bearer,sigv4,sigv4a", + "sigv4a,sigv4,bearer", + "sigv4,sigv4a,bearer", + null, + "sigv4", + "JVM system property has precedence over env var and profile")), + + Arguments.of(new TestCase( + "bearer,sigv4,sigv4a", + "sigv4,sigv4a,bearer", + "sigv4,sigv4a,bearer", + Arrays.asList("sigv4a", "noauth", "bearer"), + "sigv4a", + "Client config has highest precedence")) + ); + } + + public static class TestCase { + private final String profileSetting; + private final String envVarSetting; + private final String systemPropSetting; + private final List clientSetting; + private final String resolvedAuthScheme; + private final String caseName; + + public TestCase(String profileSetting, String envVarSetting, String systemPropSetting, List clientSetting, + String resolvedAuthScheme, String caseName) { + this.profileSetting = profileSetting; + this.envVarSetting = envVarSetting; + this.systemPropSetting = systemPropSetting; + this.clientSetting = clientSetting; + this.resolvedAuthScheme = resolvedAuthScheme; + + this.caseName = caseName; + } + + @Override + public String toString() { + return caseName; + } + } + + public static class AutSchemeCapturingInterceptor implements ExecutionInterceptor { + private final AtomicReference authScheme = new AtomicReference<>(); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + SelectedAuthScheme scheme = executionAttributes.getAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME); + String schemeId = scheme.authSchemeOption().schemeId(); + authScheme.set(schemeId.replace("aws.auth#", "")); + throw new CaptureException(); + } + + + public String authScheme() { + return this.authScheme.get(); + } + + public static class CaptureException extends RuntimeException { + } + } + + public static class SkipCrtNoOpSigner implements HttpSigner { + + @Override + public SignedRequest sign(SignRequest request) { + return SignedRequest + .builder() + .request(request.request()) + .build(); + } + + @Override + public CompletableFuture signAsync( + AsyncSignRequest request) { + return CompletableFuture.completedFuture( + AsyncSignedRequest.builder() + .request(request.request()) + .build() + ); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java new file mode 100644 index 000000000000..6f93bff6a7d9 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java @@ -0,0 +1,187 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.useragent; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.concurrent.Executors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.services.protocolrestjsonwithconfig.ProtocolRestJsonWithConfigAsyncClient; +import software.amazon.awssdk.services.protocolrestjsonwithconfig.ProtocolRestJsonWithConfigClient; +import software.amazon.awssdk.services.protocolrestjsonwithconfig.model.StreamingOutputOperationResponse; +import software.amazon.awssdk.testutils.RandomTempFile; + +public class StreamingBodyAndTransformerImplTrackingTest { + + private CapturingInterceptor interceptor; + + @BeforeEach + public void setup() { + this.interceptor = new CapturingInterceptor(); + } + + @Test + public void streamingInputOperation_syncClient_stringBody_recordsMetadata() { + callStreamingInputOperation(syncClient(), RequestBody.fromString("body")); + assertThat(interceptor.userAgent()).contains("md/rb#b"); + } + + @Test + public void streamingInputOperation_syncClient_fileBody_recordsMetadata() throws IOException { + callStreamingInputOperation(syncClient(), RequestBody.fromFile(new RandomTempFile(64))); + assertThat(interceptor.userAgent()).contains("md/rb#f"); + } + + @Test + public void streamingInputOperation_syncClient_streamBody_recordsMetadata() throws IOException { + callStreamingInputOperation( + syncClient(), + RequestBody.fromInputStream(new ByteArrayInputStream(new byte[64]), 64)); + assertThat(interceptor.userAgent()).contains("md/rb#s"); + } + + @Test + public void streamingInputOperation_asyncClient_stringBody_recordsMetadata() { + callStreamingInputOperation(asyncClient(), AsyncRequestBody.fromString("body")); + assertThat(interceptor.userAgent()).contains("md/rb#b"); + } + + @Test + public void streamingInputOperation_asyncClient_fileBody_recordsMetadata() throws IOException { + callStreamingInputOperation(asyncClient(), AsyncRequestBody.fromFile(new RandomTempFile(64))); + assertThat(interceptor.userAgent()).contains("md/rb#f"); + } + + @Test + public void streamingInputOperation_asyncClient_streamBody_recordsMetadata() throws IOException { + callStreamingInputOperation( + asyncClient(), + AsyncRequestBody.fromInputStream(new ByteArrayInputStream(new byte[64]), 64L, Executors.newSingleThreadExecutor()) + ); + assertThat(interceptor.userAgent()).contains("md/rb#s"); + } + + @Test + public void streamingOutputOperation_syncClient_bytes_recordsMetadata() { + callStreamingOutputOperation(syncClient(), ResponseTransformer.toBytes()); + assertThat(interceptor.userAgent()).contains("md/rt#b"); + } + + @Test + public void streamingOutputOperation_syncClient_file_recordsMetadata() throws IOException { + callStreamingOutputOperation(syncClient(), ResponseTransformer.toFile(new RandomTempFile(0))); + assertThat(interceptor.userAgent()).contains("md/rt#f"); + } + + @Test + public void streamingOutputOperation_syncClient_stream_recordsMetadata() { + callStreamingOutputOperation(syncClient(), ResponseTransformer.toOutputStream(new OutputStream() { + @Override + public void write(int b) { + // no-op + } + })); + assertThat(interceptor.userAgent()).contains("md/rt#s"); + } + + @Test + public void streamingOutputOperation_asyncClient_bytes_recordsMetadata() { + callStreamingOutputOperation(asyncClient(), AsyncResponseTransformer.toBytes()); + assertThat(interceptor.userAgent()).contains("md/rt#b"); + } + + @Test + public void streamingOutputOperation_asyncClient_file_recordsMetadata() throws IOException { + callStreamingOutputOperation(asyncClient(), AsyncResponseTransformer.toFile(new RandomTempFile(0))); + assertThat(interceptor.userAgent()).contains("md/rt#f"); + } + + @Test + public void streamingOutputOperation_asyncClient_publisher_recordsMetadata() { + callStreamingOutputOperation(asyncClient(), AsyncResponseTransformer.toPublisher()); + assertThat(interceptor.userAgent()).contains("md/rt#p"); + } + + + + private ProtocolRestJsonWithConfigClient syncClient() { + return ProtocolRestJsonWithConfigClient + .builder() + .overrideConfiguration(c -> c.addExecutionInterceptor(interceptor)) + .build(); + } + + private static void callStreamingInputOperation(ProtocolRestJsonWithConfigClient client, RequestBody requestBody) { + assertThatThrownBy(() -> client.streamingInputOperation(r -> {}, requestBody)) + .hasMessageContaining("stop"); + } + + private void callStreamingOutputOperation( + ProtocolRestJsonWithConfigClient client, ResponseTransformer transformer) { + assertThatThrownBy(() -> client.streamingOutputOperation(r -> {}, transformer)) + .hasMessageContaining("stop"); + } + + private ProtocolRestJsonWithConfigAsyncClient asyncClient() { + return ProtocolRestJsonWithConfigAsyncClient + .builder() + .overrideConfiguration(c -> c.addExecutionInterceptor(interceptor)) + .build(); + } + + private static void callStreamingInputOperation(ProtocolRestJsonWithConfigAsyncClient client, AsyncRequestBody requestBody) { + assertThatThrownBy(() -> { + client.streamingInputOperation( + r -> { + r.overrideConfiguration( + c -> c.putHeader("x-amz-content-sha256", "value")); + }, + requestBody).join(); + }).hasMessageContaining("stop"); + } + + private void callStreamingOutputOperation( + ProtocolRestJsonWithConfigAsyncClient client, AsyncResponseTransformer transformer) { + assertThatThrownBy(() -> client.streamingOutputOperation(r -> {}, transformer).join()) + .hasMessageContaining("stop"); + } + + public static class CapturingInterceptor implements ExecutionInterceptor { + private Context.BeforeTransmission context; + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + this.context = context; + throw new RuntimeException("stop"); + } + + public String userAgent() { + return context.httpRequest().headers().get("User-Agent").get(0); + } + } +} diff --git a/test/crt-unavailable-tests/pom.xml b/test/crt-unavailable-tests/pom.xml index 99513c39d8aa..840f58e44367 100644 --- a/test/crt-unavailable-tests/pom.xml +++ b/test/crt-unavailable-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 05946a9bbf64..fe947b600e21 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 389f5866a453..e22fb676efe0 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/old-client-version-compatibility-test/pom.xml b/test/old-client-version-compatibility-test/pom.xml index 68918ec57097..3086ba033df5 100644 --- a/test/old-client-version-compatibility-test/pom.xml +++ b/test/old-client-version-compatibility-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 9c71eb2da750..1b55375350b4 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 5db9c0485994..746d7235a307 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index 2fc77665773c..fd35afedf15c 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index be43aa7d69f2..8773f9f72030 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 66a6e2bafd86..3ab8c6eef3ff 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-tests/pom.xml b/test/s3-tests/pom.xml index 41c621b87b71..1719c1b90c99 100644 --- a/test/s3-tests/pom.xml +++ b/test/s3-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 @@ -162,12 +162,6 @@ jetty-server test
        - - software.amazon.awssdk - bundle-sdk - ${project.version} - test - diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index c3b23b9dedc0..21a19a23cf60 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 5f5a7394f09f..6f66c35f02dc 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index ca32d1ced01e..9e558dc54ee7 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index 9effd5e1905b..f636a5854233 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index f3b3d5191b2c..f204e47031f7 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 76fdb637993e..b8d826d220a1 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/v2-migration-tests/pom.xml b/test/v2-migration-tests/pom.xml index 023ca3fbd4b3..ba991e64e8db 100644 --- a/test/v2-migration-tests/pom.xml +++ b/test/v2-migration-tests/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../.. diff --git a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java index 03a160657e8c..974e9627a1b2 100644 --- a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java +++ b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java @@ -15,16 +15,25 @@ package foo.bar; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.model.Copy; import software.amazon.awssdk.transfer.s3.model.CopyRequest; +import software.amazon.awssdk.transfer.s3.model.DirectoryDownload; +import software.amazon.awssdk.transfer.s3.model.DownloadDirectoryRequest; import software.amazon.awssdk.transfer.s3.model.DownloadFileRequest; import software.amazon.awssdk.transfer.s3.model.FileDownload; +import software.amazon.awssdk.transfer.s3.model.FileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileDownload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.model.UploadRequest; @@ -35,11 +44,13 @@ public class TransferManagerS3 { File file = new File("path/to/file.txt"); - void tmConstructor() { + void tmConstructor(AwsCredentials credentials, AwsCredentialsProvider credentialsProvider) { S3TransferManager tm = S3TransferManager.builder() .build(); S3TransferManager tmBuilderDefault = S3TransferManager.create(); S3TransferManager tmBuilderWithS3 = S3TransferManager.builder().build(); + S3TransferManager tmConstructorWithCred = S3TransferManager.builder().s3Client(S3AsyncClient.builder().credentialsProvider(StaticCredentialsProvider.create(credentials)).build()).build(); + S3TransferManager tmConstructorWithCredProvider = S3TransferManager.builder().s3Client(S3AsyncClient.builder().credentialsProvider(credentialsProvider).build()).build(); } void download(S3TransferManager tm, String bucket, String key) { @@ -76,4 +87,14 @@ void copy(S3TransferManager tm, String sourceBucket, String sourceKey, String de .build(); Copy copy2 = tm.copy(CopyRequest.builder().copyObjectRequest(copyRequest).build()); } + + void downloadDirectory(S3TransferManager tm, File destination) { + DirectoryDownload fileDownload = tm.downloadDirectory(DownloadDirectoryRequest.builder().bucket("bucket").listObjectsV2RequestTransformer(builder -> builder.prefix("key")).destination(destination.toPath()).build()); + tm.close(); + } + + void resume(S3TransferManager tm, ResumableFileDownload persistableDownload, ResumableFileUpload persistableUpload) { + FileDownload download = tm.resumeDownloadFile(persistableDownload); + FileUpload upload = tm.resumeUploadFile(persistableUpload); + } } diff --git a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java index 49975abfefed..89c8f427955b 100644 --- a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java +++ b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java @@ -15,23 +15,31 @@ package foo.bar; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.transfer.Copy; import com.amazonaws.services.s3.transfer.Download; +import com.amazonaws.services.s3.transfer.MultipleFileDownload; +import com.amazonaws.services.s3.transfer.PersistableDownload; +import com.amazonaws.services.s3.transfer.PersistableUpload; import com.amazonaws.services.s3.transfer.TransferManager; import com.amazonaws.services.s3.transfer.TransferManagerBuilder; +import com.amazonaws.services.s3.transfer.Upload; import java.io.File; public class TransferManagerS3 { File file = new File("path/to/file.txt"); - void tmConstructor() { + void tmConstructor(AWSCredentials credentials, AWSCredentialsProvider credentialsProvider) { TransferManager tm = new TransferManager(); TransferManager tmBuilderDefault = TransferManagerBuilder.defaultTransferManager(); TransferManager tmBuilderWithS3 = TransferManagerBuilder.standard().build(); + TransferManager tmConstructorWithCred = new TransferManager(credentials); + TransferManager tmConstructorWithCredProvider = new TransferManager(credentialsProvider); } void download(TransferManager tm, String bucket, String key) { @@ -64,4 +72,14 @@ void copy(TransferManager tm, String sourceBucket, String sourceKey, String dest CopyObjectRequest copyRequest = new CopyObjectRequest(sourceBucket, sourceKey, destinationBucket, destinationKey); Copy copy2 = tm.copy(copyRequest); } + + void downloadDirectory(TransferManager tm, File destination) { + MultipleFileDownload fileDownload = tm.downloadDirectory("bucket", "key", destination); + tm.shutdownNow(); + } + + void resume(TransferManager tm, PersistableDownload persistableDownload, PersistableUpload persistableUpload) { + Download download = tm.resumeDownload(persistableDownload); + Upload upload = tm.resumeUpload(persistableUpload); + } } diff --git a/third-party/pom.xml b/third-party/pom.xml index 39b9e31bae49..830550ce258f 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index 5d29b0e8a172..8601f2c6e769 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index 6f1e8ab56615..190b25ca62a2 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-slf4j-api/pom.xml b/third-party/third-party-slf4j-api/pom.xml index f7c564533241..8137995f68c7 100644 --- a/third-party/third-party-slf4j-api/pom.xml +++ b/third-party/third-party-slf4j-api/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 09cd4afc7715..01c21262b3a1 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT 4.0.0 diff --git a/v2-migration/pom.xml b/v2-migration/pom.xml index df495dbdfdd8..668ea5d63c49 100644 --- a/v2-migration/pom.xml +++ b/v2-migration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.52-SNAPSHOT + 2.31.65-SNAPSHOT ../pom.xml diff --git a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java index c2c12ff9ed5d..161a3387171f 100644 --- a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java +++ b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java @@ -16,17 +16,21 @@ package software.amazon.awssdk.v2migration; import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_S3_MODEL_PKG; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_TM_CLIENT; import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_TM_MODEL_PKG; import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.v2TmMethodMatcher; +import java.util.regex.Pattern; import org.openrewrite.ExecutionContext; import org.openrewrite.Recipe; import org.openrewrite.TreeVisitor; import org.openrewrite.java.AddImport; -import org.openrewrite.java.JavaIsoVisitor; import org.openrewrite.java.JavaTemplate; +import org.openrewrite.java.JavaVisitor; import org.openrewrite.java.MethodMatcher; +import org.openrewrite.java.tree.Expression; import org.openrewrite.java.tree.J; +import org.openrewrite.java.tree.JavaType; import software.amazon.awssdk.annotations.SdkInternalApi; @SdkInternalApi @@ -47,6 +51,13 @@ public class TransferManagerMethodsToV2 extends Recipe { private static final MethodMatcher COPY_BUCKET_KEY = v2TmMethodMatcher("copy(String, String, String, String"); + private static final MethodMatcher DOWNLOAD_DIR = v2TmMethodMatcher("downloadDirectory(String, String, java.io.File)"); + + private static final Pattern S3_TM_CREDENTIAL = Pattern.compile(V2_TM_CLIENT); + private static final Pattern V2_AWSCREDENTAIL = Pattern.compile("software.amazon.awssdk.auth.credentials.AwsCredentials"); + private static final Pattern V2_CREDENTIAL_PROVIDER = Pattern.compile("software.amazon.awssdk.auth.credentials" + + ".AwsCredentialsProvider"); + @Override public String getDisplayName() { return "Transfer Manager Methods to V2"; @@ -62,10 +73,10 @@ public TreeVisitor getVisitor() { return new Visitor(); } - private static final class Visitor extends JavaIsoVisitor { + private static final class Visitor extends JavaVisitor { @Override - public J.MethodInvocation visitMethodInvocation(J.MethodInvocation method, ExecutionContext executionContext) { + public J visitMethodInvocation(J.MethodInvocation method, ExecutionContext executionContext) { if (DOWNLOAD_BUCKET_KEY_FILE.matches(method, false)) { method = transformDownloadWithBucketKeyFile(method); @@ -95,10 +106,70 @@ public J.MethodInvocation visitMethodInvocation(J.MethodInvocation method, Execu method = transformUploadWithBucketKeyFile(method); return super.visitMethodInvocation(method, executionContext); } + if (DOWNLOAD_DIR.matches(method, false)) { + method = transformDownloadDirectory(method); + return super.visitMethodInvocation(method, executionContext); + } return super.visitMethodInvocation(method, executionContext); } + @Override + public J visitNewClass(J.NewClass newClass, ExecutionContext executionContext) { + JavaType type = newClass.getType(); + if (!(type instanceof JavaType.FullyQualified)) { + return newClass; + } + + if (type.isAssignableFrom(S3_TM_CREDENTIAL) && + newClass.getArguments().size() == 1 && + newClass.getArguments().get(0).getType() != null) { + Expression arg = newClass.getArguments().get(0); + if (arg.getType().isAssignableFrom(V2_AWSCREDENTAIL)) { + addS3AsyncClientImport(); + addStaticCredentialsProviderImport(); + + return JavaTemplate + .builder("S3TransferManager.builder()" + + ".s3Client(S3AsyncClient.builder()" + + ".credentialsProvider(StaticCredentialsProvider.create(#{any()}))" + + ".build())" + + ".build()") + .build() + .apply(getCursor(), newClass.getCoordinates().replace(), arg); + } + if (arg.getType().isAssignableFrom(V2_CREDENTIAL_PROVIDER)) { + addS3AsyncClientImport(); + + return JavaTemplate + .builder("S3TransferManager.builder()" + + ".s3Client(S3AsyncClient.builder()" + + ".credentialsProvider(#{any()})" + + ".build())" + + ".build()") + .build() + .apply(getCursor(), newClass.getCoordinates().replace(), arg); + } + } + + return super.visitNewClass(newClass, executionContext); + } + + private J.MethodInvocation transformDownloadDirectory(J.MethodInvocation method) { + String v2Method = "#{any()}.downloadDirectory(DownloadDirectoryRequest.builder()" + + ".bucket(#{any()}).listObjectsV2RequestTransformer(builder -> builder.prefix(#{any()}))" + + ".destination(#{any()}.toPath()).build())"; + + method = JavaTemplate.builder(v2Method).build() + .apply(getCursor(), method.getCoordinates().replace(), method.getSelect(), + method.getArguments().get(0), method.getArguments().get(1), + method.getArguments().get(2)); + + addTmImport("DirectoryDownload"); + addTmImport("DownloadDirectoryRequest"); + return method; + } + private J.MethodInvocation transformUploadWithBucketKeyFile(J.MethodInvocation method) { String v2Method = "#{any()}.uploadFile(UploadFileRequest.builder()" + ".putObjectRequest(PutObjectRequest.builder().bucket(#{any()}).key(#{any()}).build())" @@ -220,5 +291,13 @@ private void addDurationImport() { private void addRequestOverrideConfigImport() { doAfterVisit(new AddImport<>("software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration", null, false)); } + + private void addS3AsyncClientImport() { + doAfterVisit(new AddImport<>("software.amazon.awssdk.services.s3.S3AsyncClient", null, false)); + } + + private void addStaticCredentialsProviderImport() { + doAfterVisit(new AddImport<>("software.amazon.awssdk.auth.credentials.StaticCredentialsProvider", null, false)); + } } } diff --git a/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml b/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml index 1409d4f853c0..5068d764e29a 100644 --- a/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml +++ b/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml @@ -53,4 +53,5 @@ recipeList: - software.amazon.awssdk.v2migration.S3NonStreamingRequestToV2Complex - software.amazon.awssdk.v2migration.S3PutObjectRequestToV2 - software.amazon.awssdk.v2migration.SettersToBuilderV2 + - software.amazon.awssdk.v2migration.ChangeTransferManagerSimpleMethods - software.amazon.awssdk.v2migration.TransferManagerMethodsToV2 \ No newline at end of file diff --git a/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml new file mode 100644 index 000000000000..e06beb03f632 --- /dev/null +++ b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml @@ -0,0 +1,28 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +--- +type: specs.openrewrite.org/v1beta/recipe +name: software.amazon.awssdk.v2migration.ChangeTransferManagerSimpleMethods +displayName: Change TransferManager simple methods to v2. +description: Change TransferManager simple methods to v2. +recipeList: + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.S3TransferManager resumeDownload(..) + newMethodName: resumeDownloadFile + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.S3TransferManager resumeUpload(..) + newMethodName: resumeUploadFile + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.S3TransferManager shutdownNow() + newMethodName: close \ No newline at end of file diff --git a/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml index e546ecff46d6..159f7b123262 100644 --- a/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml +++ b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml @@ -18,6 +18,12 @@ name: software.amazon.awssdk.v2migration.ChangeTransferManagerTypes displayName: Change SDK TransferManager types from v1 to v2 description: Change SDK TransferManager types from v1 to v2. recipeList: + - software.amazon.awssdk.v2migration.openrewrite.ChangeMethodInvocationReturnType: + methodPattern: com.amazonaws.services.s3.transfer.TransferManager resumeDownload(..) + newReturnType: software.amazon.awssdk.transfer.s3.model.FileDownload + - software.amazon.awssdk.v2migration.openrewrite.ChangeMethodInvocationReturnType: + methodPattern: com.amazonaws.services.s3.transfer.TransferManager resumeUpload(..) + newReturnType: software.amazon.awssdk.transfer.s3.model.FileUpload - org.openrewrite.java.ChangeType: oldFullyQualifiedTypeName: com.amazonaws.services.s3.transfer.TransferManager newFullyQualifiedTypeName: software.amazon.awssdk.transfer.s3.S3TransferManager From 2bdc89695f2d409a393957347100c99bda06ffc6 Mon Sep 17 00:00:00 2001 From: Dongie Agnir Date: Thu, 19 Jun 2025 13:52:56 -0700 Subject: [PATCH 07/10] Fix POM --- codegen/pom.xml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/codegen/pom.xml b/codegen/pom.xml index 79cfa0a3785d..c16362dc58b8 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -239,5 +239,10 @@ mockito-core compile + + nl.jqno.equalsverifier + equalsverifier + test + From 47b3b4dd427f0d349a24a5089da5841663b3b6e9 Mon Sep 17 00:00:00 2001 From: Dongie Agnir Date: Thu, 19 Jun 2025 14:00:42 -0700 Subject: [PATCH 08/10] Remove released changelog file --- .changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 .changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json diff --git a/.changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json b/.changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json deleted file mode 100644 index 9c3ed4965c7b..000000000000 --- a/.changes/next-release/bugfix-AWSSDKforJavaV2-a136845.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "bugfix", - "category": "AWS SDK for Java V2", - "contributor": "", - "description": "Fix a bug in ConstructorCache when classes are GC'ed but not removed from cache" -} From 769a152358b60b6f66ba37d6ec5586466f55aecb Mon Sep 17 00:00:00 2001 From: Dongie Agnir Date: Thu, 19 Jun 2025 15:34:07 -0700 Subject: [PATCH 09/10] Fix dependency --- codegen-maven-plugin/pom.xml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 786e920c1c70..43ca740818a9 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -57,6 +57,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + utils + software.amazon.awssdk + ${awsjavasdk.version} + org.junit.jupiter junit-jupiter From 544fd05e93fce443810f04d4fbeba3a7bf92ea3b Mon Sep 17 00:00:00 2001 From: Dongie Agnir Date: Tue, 24 Jun 2025 11:24:18 -0700 Subject: [PATCH 10/10] Use try-with-resources --- .../amazon/awssdk/codegen/maven/plugin/GenerationMojo.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java index cead2cbe24d9..3d17b4d84bb2 100644 --- a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java +++ b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java @@ -236,9 +236,9 @@ private Optional loadOptionalModel(Class clzz, Path location) { private void emitValidationReport(ModelValidationReport report) { Path modelsDir = sourcesDirectory.resolve("models"); - try { - Writer writer = Files.newBufferedWriter(modelsDir.resolve("validation-report.json"), - StandardCharsets.UTF_8); + try (Writer writer = Files.newBufferedWriter(modelsDir.resolve("validation-report.json"), + StandardCharsets.UTF_8);) { + Jackson.writeWithObjectMapper(report, writer); } catch (IOException e) { getLog().warn("Failed to write validation report to " + modelsDir, e);